-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
176 lines (152 loc) · 8.8 KB
/
train.py
File metadata and controls
176 lines (152 loc) · 8.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
import argparse
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from albumentations.augmentations import transforms
from albumentations.core.composition import Compose
from albumentations import RandomRotate90, Resize
from src.utils.util import AverageMeter
from src.utils.metrics import iou_score
from src.utils.encode import one_hot_encoder
from src.utils.augment import medical_augmenter
from src.utils import ramps
from src.utils.dataset import (SemiDataSets, TwoStreamBatchSampler)
from src.network.AAMS import AAMS
from torch.optim.lr_scheduler import CosineAnnealingLR
import os
from src.utils.losses import fusion_bce_dice, loss_function
import torch.nn.functional as F
def seed_torch(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default="AAMS", help='model')
parser.add_argument('--num_class', type=int, default=4, help='number of object classes')
parser.add_argument('--semi_percent', type=float, default=0.8)
parser.add_argument('--base_dir', type=str, default="./data/ungt", help='dir')
parser.add_argument('--train_file_dir', type=str, default="train.txt", help='dir')
parser.add_argument('--val_file_dir', type=str, default="val.txt", help='dir')
parser.add_argument('--img_size', type=int, default=224, help='image size')
parser.add_argument('--max_iterations', type=int, default=20000, help='maximum training epoch')
parser.add_argument('--total_batch_size', type=int, default=8, help='batch size per gpu')
parser.add_argument('--base_lr', type=float, default=0.01, help='network learning rate')
parser.add_argument('--wei_flag', type=bool, default=True, help='category weighting flag')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--labeled_bs', type=int, default=4, help='labeled batch size per gpu')
parser.add_argument('--con', type=float, default=7, help='consistency')
parser.add_argument('--con_ram', type=float, default=200.0, help='consistency rampup')
parser.add_argument('--kernel_size', type=int, default=7, help='RREC kernel size')
parser.add_argument('--length', type=tuple, default=(3, 3, 3), help='length of RREC')
args = parser.parse_args()
seed_torch(args.seed)
def getDataloader(args):
train_transform = Compose([
medical_augmenter(level=5),
transforms.Normalize(),
])
val_transform = Compose([
transforms.Normalize(),
])
labeled_slice = args.semi_percent
db_train = SemiDataSets(base_dir=args.base_dir, split="train", transform=train_transform,
train_file_dir=args.train_file_dir, val_file_dir=args.val_file_dir,
)
db_val = SemiDataSets(base_dir=args.base_dir, split="val", transform=val_transform,
train_file_dir=args.train_file_dir, val_file_dir=args.val_file_dir
)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
total_slices = len(db_train)
labeled_idxs = list(range(0, int(labeled_slice * total_slices)))
unlabeled_idxs = list(range(int(labeled_slice * total_slices), total_slices))
print("Labeled: {} ({}%), unlabeled: {}".format(len(labeled_idxs), labeled_slice * 100, len(unlabeled_idxs)))
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, args.total_batch_size, args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=0, pin_memory=False, worker_init_fn=worker_init_fn)
valloader = DataLoader(db_val, batch_size=args.total_batch_size, shuffle=False, num_workers=0)
return trainloader, valloader
def train(args):
max_iterations = int(args.max_iterations * args.semi_percent)
trainloader, valloader = getDataloader(args)
model = AAMS(args.num_class, length=args.length, k=args.kernel_size).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.base_lr, momentum=0.9, weight_decay=0.0001)
best_iou, iter_num = 0, 0
max_epoch = max_iterations // len(trainloader) + 1
scheduler = CosineAnnealingLR(optimizer, T_max=max_epoch, verbose=False)
for epoch_num in range(max_epoch):
avg_meters = {'train_loss': AverageMeter(),
'fusion_loss': AverageMeter(),
'sensitivity_loss': AverageMeter(),
'unsupervised_loss': AverageMeter(),
'train_iou': AverageMeter(),
'val_loss': AverageMeter(),
'val_iou': AverageMeter(),
'val_dsc': AverageMeter(),
'val_sen': AverageMeter(),
'val_pre': AverageMeter(),
'val_fos': AverageMeter(),
'val_spe': AverageMeter(),
'val_acc': AverageMeter()
}
model.train()
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
out_main, out_aux1, out_aux2, out_aux3 = model(volume_batch)
label_batch = one_hot_encoder(args.num_class, label_batch, args.base_dir)
tr_loss, fu_loss, se_loss, un_loss = loss_function(out_main, out_aux1, out_aux2, out_aux3, label_batch, iter_num,
args.con, args.con_ram, args.labeled_bs, args.num_class, args.wei_flag)
optimizer.zero_grad()
tr_loss.backward()
optimizer.step()
iou, _, _, _, _, _, _ = iou_score(out_main[:args.labeled_bs], label_batch[:args.labeled_bs], args.num_class)
avg_meters['train_loss'].update(tr_loss, volume_batch[:args.labeled_bs].size(0))
avg_meters['fusion_loss'].update(fu_loss, volume_batch[:args.labeled_bs].size(0))
avg_meters['sensitivity_loss'].update(se_loss, volume_batch[:args.labeled_bs].size(0))
avg_meters['unsupervised_loss'].update(un_loss, volume_batch[args.labeled_bs:].size(0))
avg_meters['train_iou'].update(np.mean(iou), volume_batch[:args.labeled_bs].size(0))
scheduler.step()
model.eval()
save_str = f"{args.model}_{os.path.basename(args.base_dir)}_{args.seed}"
with torch.no_grad():
for i_batch, sampled_batch in enumerate(valloader):
input, target = sampled_batch['image'], sampled_batch['label']
input, target = input.cuda(), target.cuda()
output = model(input)
target = one_hot_encoder(args.num_class, target, args.base_dir)
va_loss = fusion_bce_dice(output, target, args.num_class)
iou, dsc, sen, pre, fos, spe, acc = iou_score(output, target, args.num_class)
avg_meters['val_loss'].update(va_loss, input.size(0))
avg_meters['val_iou'].update(np.mean(iou), input.size(0))
avg_meters['val_dsc'].update(np.mean(dsc), input.size(0))
avg_meters['val_sen'].update(np.mean(sen), input.size(0))
avg_meters['val_pre'].update(np.mean(pre), input.size(0))
avg_meters['val_fos'].update(np.mean(fos), input.size(0))
avg_meters['val_spe'].update(np.mean(spe), input.size(0))
avg_meters['val_acc'].update(np.mean(acc), input.size(0))
print(
'Epoch [%3d/%d] Train: L %.4f, Lf %.4f, Ls %.4f, Lu %.4f, IoU %.4f; Validation: L %.4f, IoU %.4f, '
'DSC %.4f, SEN %.4f, PRE %.4f, FOS %.4f, SPE %.4f, ACC %.4f'
% (epoch_num+1, max_epoch, avg_meters['train_loss'].avg, avg_meters['fusion_loss'].avg,
avg_meters['sensitivity_loss'].avg, avg_meters['unsupervised_loss'].avg, avg_meters['train_iou'].avg,
avg_meters['val_loss'].avg, avg_meters['val_iou'].avg, avg_meters['val_dsc'].avg,
avg_meters['val_sen'].avg, avg_meters['val_pre'].avg, avg_meters['val_fos'].avg, avg_meters['val_spe'].avg,
avg_meters['val_acc'].avg), file=open(f"./checkpoint/{save_str}_log.txt", "a"))
if avg_meters['val_iou'].avg > best_iou:
torch.save(model.state_dict(), f'checkpoint/{save_str}_model.pth')
torch.save(model, f'checkpoint/{save_str}_model.pkl')
best_iou = avg_meters['val_iou'].avg
print("=> Model saved", file=open(f"./checkpoint/{save_str}_log.txt", "a"))
return "Training Finished!"
if __name__ == "__main__":
train(args)