-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtemplate_model.py
More file actions
79 lines (66 loc) · 2.1 KB
/
template_model.py
File metadata and controls
79 lines (66 loc) · 2.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import os
import shutil
import torch
from torch import nn
# Define your model here.
def build_model() -> nn.Module:
return nn.Sequential(
nn.Flatten(),
nn.Linear(28 * 28, 128),
nn.ReLU(),
nn.Linear(128, 10),
)
# Provide a sample input matching your model's expected input shape.
def get_dummy_input() -> torch.Tensor:
return torch.randn(1, 1, 28, 28)
def _get_device(requested: str) -> torch.device:
if requested == "cuda" and torch.cuda.is_available():
return torch.device("cuda:0")
if requested == "cpu":
return torch.device("cpu")
return torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# Training settings live in the model file.
EPOCHS = 1
BATCH_SIZE = 32
DEVICE = "auto"
def train_and_export(
output_dir: str,
temp_models_dir: str,
) -> str:
# main.py calls this function; keep training logic inside models/.
device_obj = _get_device(DEVICE)
model = build_model().to(device_obj)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Example training loop on synthetic data.
for epoch in range(1, EPOCHS + 1):
inputs = torch.randn(BATCH_SIZE, 1, 28, 28, device=device_obj)
labels = torch.randint(0, 10, (BATCH_SIZE,), device=device_obj)
optimizer.zero_grad()
logits = model(inputs)
loss = loss_fn(logits, labels)
loss.backward()
optimizer.step()
print(f"Epoch {epoch}/{EPOCHS} | loss {loss.item():.4f}")
os.makedirs(output_dir, exist_ok=True)
model.eval()
model_cpu = model.to("cpu")
dummy_input = get_dummy_input().to("cpu")
onnx_path = os.path.join(output_dir, "model.onnx")
os.makedirs(temp_models_dir, exist_ok=True)
temp_name = f"{os.path.basename(output_dir)}_temp.onnx"
temp_onnx_path = os.path.join(temp_models_dir, temp_name)
# Export to a temp ONNX and then copy to the final output folder.
torch.onnx.export(
model_cpu,
dummy_input,
temp_onnx_path,
export_params=True,
opset_version=13,
do_constant_folding=True,
input_names=["input"],
output_names=["logits"],
dynamic_axes={"input": {0: "batch"}, "logits": {0: "batch"}},
)
shutil.copy2(temp_onnx_path, onnx_path)
return onnx_path