/
ssl_dataset.py
336 lines (294 loc) · 13.7 KB
/
ssl_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
import torch
from .data_utils import split_ssl_data, sample_labeled_data
from .dataset import BasicDataset
from collections import Counter
import torchvision
import numpy as np
from torchvision import transforms
import json
import os
import random
from .augmentation.randaugment import RandAugment
from torch.utils.data import sampler, DataLoader
from torch.utils.data.sampler import BatchSampler
import torch.distributed as dist
from datasets.DistributedProxySampler import DistributedProxySampler
import gc
import sys
import copy
from PIL import Image
mean, std = {}, {}
mean['cifar10'] = [x / 255 for x in [125.3, 123.0, 113.9]]
mean['cifar100'] = [x / 255 for x in [129.3, 124.1, 112.4]]
mean['svhn'] = [0.4380, 0.4440, 0.4730]
mean['stl10'] = [x / 255 for x in [112.4, 109.1, 98.6]]
mean['imagenet'] = [0.485, 0.456, 0.406]
std['cifar10'] = [x / 255 for x in [63.0, 62.1, 66.7]]
std['cifar100'] = [x / 255 for x in [68.2, 65.4, 70.4]]
std['svhn'] = [0.1751, 0.1771, 0.1744]
std['stl10'] = [x / 255 for x in [68.4, 66.6, 68.5]]
std['imagenet'] = [0.229, 0.224, 0.225]
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImagenetDataset(torchvision.datasets.ImageFolder):
def __init__(self, root, transform, ulb, num_labels=-1):
super().__init__(root, transform)
self.ulb = ulb
self.num_labels = num_labels
is_valid_file = None
extensions = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
classes, class_to_idx = self._find_classes(self.root)
samples = self.make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = default_loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
if self.ulb:
self.strong_transform = copy.deepcopy(transform)
self.strong_transform.transforms.insert(0, RandAugment(3, 5))
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample_transformed = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return (index, sample_transformed, target) if not self.ulb else (
index, sample_transformed, self.strong_transform(sample))
def make_dataset(
self,
directory,
class_to_idx,
extensions=None,
is_valid_file=None,
):
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return x.lower().endswith(extensions)
lb_idx = {}
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
random.shuffle(fnames)
if self.num_labels != -1:
fnames = fnames[:self.num_labels]
if self.num_labels != -1:
lb_idx[target_class] = fnames
for fname in fnames:
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
if self.num_labels != -1:
with open('./sampled_label_idx.json', 'w') as f:
json.dump(lb_idx, f)
del lb_idx
gc.collect()
return instances
class ImageNetLoader:
def __init__(self, root_path, num_labels=-1, num_class=1000):
self.root_path = os.path.join(root_path, 'imagenet')
self.num_labels = num_labels // num_class
def get_transform(self, train, ulb):
if train:
transform = transforms.Compose([
transforms.Resize([256, 256]),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(224, padding=4, padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean["imagenet"], std["imagenet"])])
else:
transform = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean["imagenet"], std["imagenet"])])
return transform
def get_lb_train_data(self):
transform = self.get_transform(train=True, ulb=False)
data = ImagenetDataset(root=os.path.join(self.root_path, "train"), transform=transform, ulb=False,
num_labels=self.num_labels)
return data
def get_ulb_train_data(self):
transform = self.get_transform(train=True, ulb=True)
data = ImagenetDataset(root=os.path.join(self.root_path, "train"), transform=transform, ulb=True)
return data
def get_lb_test_data(self):
transform = self.get_transform(train=False, ulb=False)
data = ImagenetDataset(root=os.path.join(self.root_path, "val"), transform=transform, ulb=False)
return data
def get_transform(mean, std, crop_size, train=True):
if train:
return transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomCrop(crop_size, padding=4, padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
else:
return transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean, std)])
class SSL_Dataset:
"""
SSL_Dataset class gets dataset from torchvision.datasets,
separates labeled and unlabeled data,
and return BasicDataset: torch.utils.data.Dataset (see datasets.dataset.py)
"""
def __init__(self,
args,
alg='fixmatch',
name='cifar10',
train=True,
num_classes=10,
data_dir='./data'):
"""
Args
alg: SSL algorithms
name: name of dataset in torchvision.datasets (cifar10, cifar100, svhn, stl10)
train: True means the dataset is training dataset (default=True)
num_classes: number of label classes
data_dir: path of directory, where data is downloaed or stored.
"""
self.args = args
self.alg = alg
self.name = name
self.train = train
self.num_classes = num_classes
self.data_dir = data_dir
crop_size = 96 if self.name.upper() == 'STL10' else 224 if self.name.upper() == 'IMAGENET' else 32
self.transform = get_transform(mean[name], std[name], crop_size, train)
def get_data(self, svhn_extra=True):
"""
get_data returns data (images) and targets (labels)
shape of data: B, H, W, C
shape of labels: B,
"""
dset = getattr(torchvision.datasets, self.name.upper())
if 'CIFAR' in self.name.upper():
dset = dset(self.data_dir, train=self.train, download=True)
data, targets = dset.data, dset.targets
return data, targets
elif self.name.upper() == 'SVHN':
if self.train:
if svhn_extra: # train+extra
dset_base = dset(self.data_dir, split='train', download=True)
data_b, targets_b = dset_base.data.transpose([0, 2, 3, 1]), dset_base.labels
dset_extra = dset(self.data_dir, split='extra', download=True)
data_e, targets_e = dset_extra.data.transpose([0, 2, 3, 1]), dset_extra.labels
data = np.concatenate([data_b, data_e])
targets = np.concatenate([targets_b, targets_e])
del data_b, data_e
del targets_b, targets_e
else: # train_only
dset = dset(self.data_dir, split='train', download=True)
data, targets = dset.data.transpose([0, 2, 3, 1]), dset.labels
else: # test
dset = dset(self.data_dir, split='test', download=True)
data, targets = dset.data.transpose([0, 2, 3, 1]), dset.labels
return data, targets
elif self.name.upper() == 'STL10':
split = 'train' if self.train else 'test'
dset_lb = dset(self.data_dir, split=split, download=True)
dset_ulb = dset(self.data_dir, split='unlabeled', download=True)
data, targets = dset_lb.data.transpose([0, 2, 3, 1]), dset_lb.labels.astype(np.int64)
ulb_data = dset_ulb.data.transpose([0, 2, 3, 1])
return data, targets, ulb_data
def get_dset(self, is_ulb=False,
strong_transform=None, onehot=False):
"""
get_dset returns class BasicDataset, containing the returns of get_data.
Args
is_ulb: If True, returned dataset generates a pair of weak and strong augmented images.
strong_transform: list of strong_transform (augmentation) if use_strong_transform is True。
onehot: If True, the label is not integer, but one-hot vector.
"""
if self.name.upper() == 'STL10':
data, targets, _ = self.get_data()
else:
data, targets = self.get_data()
num_classes = self.num_classes
transform = self.transform
return BasicDataset(self.alg, data, targets, num_classes, transform,
is_ulb, strong_transform, onehot)
def get_ssl_dset(self, num_labels, index=None, include_lb_to_ulb=True,
strong_transform=None, onehot=False):
"""
get_ssl_dset split training samples into labeled and unlabeled samples.
The labeled data is balanced samples over classes.
Args:
num_labels: number of labeled data.
index: If index of np.array is given, labeled data is not randomly sampled, but use index for sampling.
include_lb_to_ulb: If True, consistency regularization is also computed for the labeled data.
strong_transform: list of strong transform (RandAugment in FixMatch)
onehot: If True, the target is converted into onehot vector.
Returns:
BasicDataset (for labeled data), BasicDataset (for unlabeld data)
"""
# Supervised top line using all data as labeled data.
if self.alg == 'fullysupervised':
lb_data, lb_targets = self.get_data()
lb_dset = BasicDataset(self.alg, lb_data, lb_targets, self.num_classes,
self.transform, False, None, onehot)
return lb_dset, None
if self.name.upper() == 'STL10':
lb_data, lb_targets, ulb_data = self.get_data()
if include_lb_to_ulb:
ulb_data = np.concatenate([ulb_data, lb_data], axis=0)
lb_data, lb_targets, _ = sample_labeled_data(self.args, lb_data, lb_targets, num_labels, self.num_classes)
ulb_targets = None
else:
data, targets = self.get_data()
lb_data, lb_targets, ulb_data, ulb_targets = split_ssl_data(self.args, data, targets,
num_labels, self.num_classes,
index, include_lb_to_ulb)
# output the distribution of labeled data for remixmatch
count = [0 for _ in range(self.num_classes)]
for c in lb_targets:
count[c] += 1
dist = np.array(count, dtype=float)
dist = dist / dist.sum()
dist = dist.tolist()
out = {"distribution": dist}
output_file = r"./data_statistics/"
output_path = output_file + str(self.name) + '_' + str(num_labels) + '.json'
if not os.path.exists(output_file):
os.makedirs(output_file, exist_ok=True)
with open(output_path, 'w') as w:
json.dump(out, w)
# print(Counter(ulb_targets.tolist()))
lb_dset = BasicDataset(self.alg, lb_data, lb_targets, self.num_classes,
self.transform, False, None, onehot)
ulb_dset = BasicDataset(self.alg, ulb_data, ulb_targets, self.num_classes,
self.transform, True, strong_transform, onehot)
# print(lb_data.shape)
# print(ulb_data.shape)
return lb_dset, ulb_dset