forked from yearing1017/UAVI_SEMANTIC_SEG
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_fold.py
More file actions
115 lines (103 loc) · 4.67 KB
/
train_fold.py
File metadata and controls
115 lines (103 loc) · 4.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from MyData_kfold import train_dataloader,val_dataloader
from deeplabv3 import resnet50, resnet101,resnet152, ResNet
from tensorboardX import SummaryWriter
from MIouv0217 import Evaluator
def train(epoch = 400):
# 创建指标计算对象
evaluator = Evaluator(4)
# 定义最好指标miou数值,初始化为0
best_pred = 0.0
writer = SummaryWriter('tblog/deeplabv3_v0304')
# 指定第二块gpu
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
# 模型建立
deeplabv3_model = resnet152()
#deeplabv3_model = torch.load('checkpoints/deeplabv3_model_90.pt')
deeplabv3_model = deeplabv3_model.to(device)
# 损失函数和优化器
criterion = nn.CrossEntropyLoss().to(device) # CrossEntropyLoss适用多分类
optimizer = optim.Adam(deeplabv3_model.parameters(), lr=1e-3)
for epo in range(epoch):
# 每个epoch都要记录5次交叉验证的train_loss和val_loss,最后除5
train_loss = 0
val_loss = 0
val_acc = 0
val_miou = 0
for i in range(5):
# 训练部分
deeplabv3_model.train()
for index, (image, label) in enumerate(train_dataloader[i]):
image = image.to(device)
label = label.to(device)
optimizer.zero_grad()
output = deeplabv3_model(image)
loss = criterion(output, label)
#print(loss.shape)
loss.backward()
iter_loss = loss.item() # 取出数值
#all_train_iter_loss.append(iter_loss)
train_loss += iter_loss
optimizer.step()
if np.mod(index, 24) == 0:
line = "epoch {}_{}, {}/{},train loss is {}".format(epo, i, index, len(train_dataloader[i]), iter_loss)
print(line)
# 写到日志文件
with open('log/logs_v0304.txt', 'a') as f :
f.write(line)
f.write('\r\n')
# 验证部分
deeplabv3_model.eval()
with torch.no_grad():
for index, (image, label) in enumerate(val_dataloader[i]):
image = image.to(device)
#label = label.reshape(-1, 5)
label = label.to(device)
optimizer.zero_grad()
output = deeplabv3_model(image)
loss = criterion(output, label)
iter_loss = loss.item()
#all_val_iter_loss.append(iter_loss)
val_loss += iter_loss
# 记录相关指标数据
pred = output.cpu().numpy()
label = label.cpu().numpy()
pred = np.argmax(pred, axis=1)
evaluator.add_batch(label, pred)
Acc = evaluator.Pixel_Accuracy()
mIoU = evaluator.Mean_Intersection_over_Union()
val_acc += Acc
val_miou += mIoU
evaluator.reset() # 该5次求指标,每次求之前先清零
line_epoch = "epoch train loss = %.3f, epoch val loss = %.3f" % (train_loss/len(train_dataloader[i])/5, val_loss/len(val_dataloader[i])/5)
print(line_epoch)
with open('log/logs_v0304.txt', 'a') as f :
f.write(line_epoch)
f.write('\r\n')
#Acc = evaluator.Pixel_Accuracy()
#Acc_class = evaluator.Pixel_Accuracy_Class()
#mIoU = evaluator.Mean_Intersection_over_Union()
# tensorboard记录
writer.add_scalar('train_loss', train_loss/len(train_dataloader[i])/5, epo)
writer.add_scalar('val_loss', val_loss/len(val_dataloader[i])/5, epo)
writer.add_scalar('val_Acc', val_acc/5, epo)
#writer.add_scalar('Acc_class', Acc_class, epo)
writer.add_scalar('val_mIoU', val_miou/5, epo)
# 每次验证,根据新得出的miou指标来保存模型
#global best_pred
new_pred = val_miou/5
if new_pred > best_pred:
best_pred = new_pred
#torch.save(deeplabv3_model.state_dict(), 'models_v0304/deeplabv3_{}.pth'.format(epo))
torch.save(deeplabv3_model, 'checkpoints_v0304/deeplabv3_model_{}.pt'.format(epo))
'''
# 每5轮保存一下模型
if np.mod(epo, 5) == 0:
torch.save(deeplabv3_model, 'checkpoints_v0225/deeplabv3_model_{}.pt'.format(epo))
print('saving checkpoints_v0225/deeplabv3_model_{}.pt'.format(epo))
'''
if __name__ == "__main__":
train(epoch=40)