Skip to content

Commit

Permalink
coverage (#38)
Browse files Browse the repository at this point in the history
* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step

* refactor optimizer step
  • Loading branch information
williamFalcon committed Jun 20, 2020
1 parent 9254626 commit 3593251
Show file tree
Hide file tree
Showing 15 changed files with 96 additions and 74 deletions.
12 changes: 6 additions & 6 deletions .github/workflows/ci-testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,12 @@ jobs:
pip list
shell: bash

- name: Cache datasets
uses: actions/cache@v1
with:
path: tests/Datasets # This path is specific to Ubuntu
# Look to see if there is a cache hit for the corresponding requirements file
key: mnist-dataset
# - name: Cache datasets
# uses: actions/cache@v1
# with:
# path: tests/Datasets # This path is specific to Ubuntu
# # Look to see if there is a cache hit for the corresponding requirements file
# key: mnist-dataset

- name: Tests
# env:
Expand Down
21 changes: 11 additions & 10 deletions pl_bolts/losses/self_supervised_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,10 @@ def forward(self, Z):


class AmdimNCELoss(nn.Module):
def __init__(self, tclip):
super().__init__()
self.tclip = tclip

def forward(self, anchor_representations, positive_representations, mask_mat):
"""
Compute the NCE scores for predicting r_src->r_trg.
Expand Down Expand Up @@ -195,14 +199,13 @@ def __init__(self, strategy='1:1', tclip=10.):
self.strategy = strategy

self.masks = {}
self.nce_loss = AmdimNCELoss()
self.nce_loss = AmdimNCELoss(tclip)

def feat_size_w_mask(self, w):
masks_r5 = np.zeros((w, w, 1, w, w))
def feat_size_w_mask(self, w, feature_map):
masks_r5 = torch.zeros((w, w, 1, w, w), device=feature_map.device).type(torch.bool)
for i in range(w):
for j in range(w):
masks_r5[i, j, 0, i, j] = 1
masks_r5 = torch.tensor(masks_r5, device=w.device).type(torch.uint8)
masks_r5 = masks_r5.reshape(-1, 1, w, w)
return masks_r5

Expand All @@ -213,9 +216,8 @@ def _sample_src_ftr(self, r_cnv, masks):

if masks is not None:
# subsample from conv-ish r_cnv to get a single vector
mask_idx = torch.randint(0, masks.size(0), (n_batch,))
mask_idx = torch.randint(0, masks.size(0), (n_batch,), device=r_cnv.device)
mask = masks[mask_idx]
mask = mask.cuda(r_cnv.device.index)
r_cnv = torch.masked_select(r_cnv, mask)

# flatten features for use as globals in glb->lcl nce cost
Expand Down Expand Up @@ -245,8 +247,7 @@ def contrastive_task_77(self, x1_maps, x2_maps):

# make masking matrix to help compute nce costs
# (b x b) zero matrix with 1s in the diag
diag_mat = torch.eye(batch_size)
diag_mat = diag_mat.cuda(r1_src_x1.device.index)
diag_mat = torch.eye(batch_size, device=r1_src_x1.device)

# -----------------
# NCE COSTS
Expand Down Expand Up @@ -480,11 +481,11 @@ def forward(self, x1_maps, x2_maps):

# make mask
if h not in self.masks:
mask = self.feat_size_w_mask(h)
mask = self.feat_size_w_mask(h, m1)
self.masks[h] = mask

if self.strategy == '1:1':
return self.one_one_loss(x1_maps, x2_maps)
return self.contrastive_task_77(x1_maps, x2_maps)
elif self.strategy == '1:5,1:7,5:5':
return self.contrastive_task_11_55_77(x1_maps, x2_maps)
elif self.strategy == '1:1,5:5,7:7':
Expand Down
4 changes: 3 additions & 1 deletion pl_bolts/models/autoencoders/basic_ae/basic_ae_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,13 @@ def __init__(
input_height=28,
batch_size=32,
learning_rate=0.001,
data_dir='',
**kwargs
):
super().__init__()
self.save_hyperparameters()

self.dataloaders = MNISTDataLoaders(save_path=os.getcwd())
self.dataloaders = MNISTDataLoaders(save_path=data_dir)

self.encoder = self.init_encoder(self.hparams.hidden_dim, self.hparams.latent_dim,
self.hparams.input_width, self.hparams.input_height)
Expand Down Expand Up @@ -122,6 +123,7 @@ def add_model_specific_args(parent_parser):
help='input image height - 28 for MNIST (must be even)')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--data_dir', type=str, default='')
return parser


Expand Down
4 changes: 3 additions & 1 deletion pl_bolts/models/autoencoders/basic_vae/basic_vae_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,13 @@ def __init__(
input_height=28,
batch_size=32,
learning_rate=0.001,
data_dir='',
**kwargs
):
super().__init__()
self.save_hyperparameters()

self.dataloaders = MNISTDataLoaders(save_path=os.getcwd())
self.dataloaders = MNISTDataLoaders(save_path=data_dir)

self.encoder = self.init_encoder(self.hparams.hidden_dim, self.hparams.latent_dim,
self.hparams.input_width, self.hparams.input_height)
Expand Down Expand Up @@ -179,6 +180,7 @@ def add_model_specific_args(parent_parser):
help='input image height - 28 for MNIST (must be even)')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--data_dir', type=str, default='')
return parser


Expand Down
4 changes: 3 additions & 1 deletion pl_bolts/models/gans/basic/basic_gan_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ def __init__(self,
b1=0.5,
b2=0.999,
learning_rate=0.0002,
data_dir='',
**kwargs):
super().__init__()

Expand All @@ -29,7 +30,7 @@ def __init__(self,

self.img_dim = (self.hparams.input_channels, self.hparams.input_width, self.hparams.input_height)

self.dataloaders = MNISTDataLoaders(save_path=os.getcwd())
self.dataloaders = MNISTDataLoaders(save_path=data_dir)

# networks
self.generator = self.init_generator(self.img_dim)
Expand Down Expand Up @@ -159,6 +160,7 @@ def add_model_specific_args(parent_parser):
parser.add_argument('--latent_dim', type=int, default=100,
help="generator embedding dim")
parser.add_argument('--batch_size', type=int, default=64, help="size of the batches")
parser.add_argument('--data_dir', type=str, default='')

return parser

Expand Down
18 changes: 9 additions & 9 deletions pl_bolts/models/mnist_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@


class LitMNISTModel(LightningModule):
def __init__(self, hidden_dim=128, learning_rate=1e-3, batch_size=32, num_workers=4):
def __init__(self, hidden_dim=128, learning_rate=1e-3, batch_size=32, num_workers=4, data_dir=''):
super().__init__()
self.save_hyperparameters()

Expand Down Expand Up @@ -146,19 +146,18 @@ def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)

def prepare_data(self):
# download data once. better than putting in the dataloader methods
# will only download on GPU 0 with N gpus
train_dataset = MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor())
self.mnist_train, self.mnist_val = random_split(train_dataset, [55000, 5000])

MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())
MNIST(self.hparams.data_dir, train=True, download=True, transform=transforms.ToTensor())

def train_dataloader(self):
loader = DataLoader(self.mnist_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())
mnist_train, _ = random_split(dataset, [55000, 5000])
loader = DataLoader(mnist_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
return loader

def val_dataloader(self):
loader = DataLoader(self.mnist_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())
_, mnist_val = random_split(dataset, [55000, 5000])
loader = DataLoader(mnist_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
return loader

def test_dataloader(self):
Expand All @@ -172,6 +171,7 @@ def add_model_specific_args(parent_parser):
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--data_dir', type=str, default='')
parser.add_argument('--learning_rate', type=float, default=0.0001)
return parser

Expand Down
41 changes: 26 additions & 15 deletions pl_bolts/models/self_supervised/amdim/amdim_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,21 +14,32 @@

class AMDIM(pl.LightningModule):

def __init__(self, hparams):
def __init__(self,
image_height=32,
ndf=320,
n_rkhs=1280,
n_depth=10,
use_bn=0,
tclip=20.0,
learning_rate=2e-4,
data_dir='',
nb_classes=10,
batch_size=200,
**kwargs
):
super().__init__()
self.save_hyperparameters()

self.hparams = hparams

dummy_batch = torch.zeros((2, 3, hparams.image_height, hparams.image_height))
dummy_batch = torch.zeros((2, 3, self.hparams.image_height, self.hparams.image_height))

self.encoder = AMDIMEncoder(
dummy_batch,
num_channels=3,
ndf=hparams.ndf,
n_rkhs=hparams.n_rkhs,
n_depth=hparams.n_depth,
encoder_size=hparams.image_height,
use_bn=hparams.use_bn
ndf=self.hparams.ndf,
n_rkhs=self.hparams.n_rkhs,
n_depth=self.hparams.n_depth,
encoder_size=self.hparams.image_height,
use_bn=self.hparams.use_bn
)
self.encoder.init_weights()

Expand Down Expand Up @@ -72,7 +83,7 @@ def training_step(self, batch, batch_nb):

return result

def training_epoch_end(self, outputs):
def training_step_end(self, outputs):
r1_x1 = outputs['r1_x1']
r5_x1 = outputs['r5_x1']
r7_x1 = outputs['r7_x1']
Expand All @@ -82,8 +93,8 @@ def training_epoch_end(self, outputs):

# ------------------
# NCE LOSS
loss_1t5, loss_1t7, loss_5t5, lgt_reg = self.nce_loss(r1_x1, r5_x1, r7_x1, r1_x2, r5_x2, r7_x2)
unsupervised_loss = loss_1t5 + loss_1t7 + loss_5t5 + lgt_reg
loss, lgt_reg = self.nce_loss((r1_x1, r5_x1, r7_x1), (r1_x2, r5_x2, r7_x2))
unsupervised_loss = loss + lgt_reg

# ------------------
# FULL LOSS
Expand All @@ -104,8 +115,8 @@ def validation_step(self, batch, batch_nb):
r1_x1, r5_x1, r7_x1, r1_x2, r5_x2, r7_x2 = self.forward(img_1, img_2)

# NCE LOSS
loss_1t5, loss_1t7, loss_5t5, lgt_reg = self.nce_loss(r1_x1, r5_x1, r7_x1, r1_x2, r5_x2, r7_x2)
unsupervised_loss = loss_1t5 + loss_1t7 + loss_5t5 + lgt_reg
loss, lgt_reg = self.nce_loss((r1_x1, r5_x1, r7_x1), (r1_x2, r5_x2, r7_x2))
unsupervised_loss = loss + lgt_reg

result = {
'val_nce': unsupervised_loss
Expand Down Expand Up @@ -315,6 +326,6 @@ def add_model_specific_args(parent_parser):

args = parser.parse_args()

model = AMDIM(args)
model = AMDIM(**vars(args))
trainer = pl.Trainer(fast_dev_run=True)
trainer.fit(model)
2 changes: 1 addition & 1 deletion pl_bolts/models/self_supervised/amdim/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def __init__(self, n_input, n_output, use_bn=False):
stride=1, padding=0, bias=True)
# when possible, initialize shortcut to be like identity
if n_output >= n_input:
eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.uint8)
eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.shortcut.weight.data.uniform_(-0.01, 0.01)
Expand Down
6 changes: 3 additions & 3 deletions pl_bolts/models/self_supervised/cpc/cpc_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,22 +235,22 @@ def prepare_data(self):
def train_dataloader(self):
loader = None
if self.hparams.dataset == 'cifar10':
train_transform = cpc_transforms.CPCTransformsCIFAR10().train_transform
train_transform = cpc_transforms.CPCTransformsCIFAR10()

elif self.hparams.dataset == 'stl10':
stl10_transform = cpc_transforms.CPCTransformsSTL10Patches(
patch_size=self.hparams.patch_size,
overlap=self.hparams.patch_overlap
)
train_transform = stl10_transform.train_transform
train_transform = stl10_transform
loader = self.dataset.train_dataloader_mixed(self.hparams.batch_size, transforms=train_transform)

if self.hparams.dataset == 'imagenet128':
train_transform = cpc_transforms.CPCTransformsImageNet128Patches(
self.hparams.patch_size,
overlap=self.hparams.patch_overlap
)
train_transform = train_transform.train_transform
train_transform = train_transform

if loader is None:
loader = self.dataset.train_dataloader(self.hparams.batch_size, transforms=train_transform)
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
pytorch-lightning>=0.8.0
pytorch-lightning>=0.8.1
torchvision>=0.5
scikit-learn>=0.23
opencv-python
Expand Down
22 changes: 0 additions & 22 deletions tests/models/self_supervised.py

This file was deleted.

4 changes: 2 additions & 2 deletions tests/models/test_autoencoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
def test_vae(tmpdir):
reset_seed()

model = VAE()
model = VAE(data_dir=tmpdir)
trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model)
trainer.test(model)
Expand All @@ -19,7 +19,7 @@ def test_vae(tmpdir):
def test_ae(tmpdir):
reset_seed()

model = AE()
model = AE(data_dir=tmpdir)
trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model)
trainer.test(model)
2 changes: 1 addition & 1 deletion tests/models/test_gans.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
def test_gan(tmpdir):
reset_seed()

model = BasicGAN()
model = BasicGAN(data_dir=tmpdir)
trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model)
trainer.test(model)
2 changes: 1 addition & 1 deletion tests/models/test_mnist_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
def test_mnist(tmpdir):
reset_seed()

model = LitMNISTModel()
model = LitMNISTModel(data_dir=tmpdir)
trainer = pl.Trainer(train_percent_check=0.01, val_percent_check=0.01, max_epochs=1,
test_percent_check=0.01, default_root_dir=tmpdir)
trainer.fit(model)
Expand Down

0 comments on commit 3593251

Please sign in to comment.