22
22
device = torch .device ("cuda:0" if torch .cuda .is_available () else "cpu" )
23
23
24
24
25
+ # TODO: Remove with next version
25
26
def download_file (path ):
26
- dest = os .path .join (tempdir , path )
27
- if not os .path .exists (dest ):
28
- os .makedirs (os .path .dirname (dest ), exist_ok = True )
29
- multiple .download_file (path , dest )
30
- return dest
27
+ return multiple .cache_file (path )
31
28
32
29
33
30
def read_image (path ):
34
- dest = download_file (path )
31
+ dest = multiple . cache_file (path )
35
32
return Image .open (dest ).convert ('RGB' )
36
33
37
34
@@ -81,7 +78,7 @@ def save_model(path, model, device='cpu'):
81
78
82
79
83
80
def load_model (path , class_names ):
84
- dest = download_file (path )
81
+ dest = multiple . cache_file (path )
85
82
model = create_model (class_names , pretrained = False )
86
83
model .load_state_dict (torch .load (dest ))
87
84
return model
@@ -98,12 +95,14 @@ def export_onnx(path, model, device='cpu'):
98
95
multiple .upload_file (src , path )
99
96
100
97
98
+ # TODO: Remove with next version
101
99
def save_json (path , values ):
102
- multiple .save_file (path , json . dumps ( values ) )
100
+ multiple .save_json (path , values )
103
101
104
102
103
+ # TODO: Remove with next version
105
104
def load_json (path ):
106
- return json . loads ( multiple .load_file (path ) )
105
+ return multiple .load_json (path )
107
106
108
107
109
108
transform = transforms .Compose ([
@@ -112,8 +111,7 @@ def load_json(path):
112
111
transforms .ToTensor (),
113
112
transforms .Normalize (
114
113
mean = [0.485 , 0.456 , 0.406 ],
115
- std = [0.229 , 0.224 , 0.225 ]
116
- )
114
+ std = [0.229 , 0.224 , 0.225 ])
117
115
])
118
116
119
117
@@ -145,27 +143,22 @@ def train_model(model, dataloaders, criterion=None, optimizer=None, scheduler=No
145
143
146
144
running_loss = 0.0
147
145
running_corrects = 0
148
-
149
- # Iterate over data.
146
+ # Iterate over data
150
147
for inputs , labels in dataloaders [phase ]:
151
148
inputs = inputs .to (device )
152
149
labels = labels .to (device )
153
-
154
150
# zero the parameter gradients
155
151
optimizer .zero_grad ()
156
-
157
152
# forward
158
153
# track history if only in train
159
154
with torch .set_grad_enabled (phase == 'train' ):
160
155
outputs = model (inputs )
161
156
_ , preds = torch .max (outputs , 1 )
162
157
loss = criterion (outputs , labels )
163
-
164
158
# backward + optimize only if in training phase
165
159
if phase == 'train' :
166
160
loss .backward ()
167
161
optimizer .step ()
168
-
169
162
# statistics
170
163
running_loss += loss .item () * inputs .size (0 )
171
164
running_corrects += torch .sum (preds == labels .data )
@@ -174,20 +167,17 @@ def train_model(model, dataloaders, criterion=None, optimizer=None, scheduler=No
174
167
175
168
epoch_loss = running_loss / len (dataloaders [phase ].dataset )
176
169
epoch_acc = running_corrects .double () / len (dataloaders [phase ].dataset )
177
-
178
170
print (f'{ phase } Loss: { epoch_loss :.4f} Acc: { epoch_acc :.4f} ' )
179
171
180
172
# deep copy the model
181
173
if phase == 'val' and epoch_acc > best_acc :
182
174
best_acc = epoch_acc
183
175
best_model_wts = copy .deepcopy (model .state_dict ())
184
-
176
+
185
177
print ()
186
-
187
178
time_elapsed = time .time () - since
188
179
print (f'Training complete in { time_elapsed // 60 :.0f} m { time_elapsed % 60 :.0f} s' )
189
180
print (f'Best val Acc: { best_acc :4f} ' )
190
-
191
181
# load best model weights
192
182
model .load_state_dict (best_model_wts )
193
183
return model
0 commit comments