-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtransformations.py
More file actions
29 lines (25 loc) · 985 Bytes
/
transformations.py
File metadata and controls
29 lines (25 loc) · 985 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import torch
from torchvision import transforms
# Transformations for weak and strong augmentations
weak_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
strong_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
def apply_transform_to_batches(data, transform):
# List to hold the augmented images
augmented_images = []
for i in range(data.size(0)):
single_image = data[i]
pil_image = transforms.ToPILImage()(single_image)
weak_aug_image = transform(pil_image)
augmented_images.append(weak_aug_image)
return torch.stack(augmented_images)