From 00b1e34c536d1cd312f561bc8bceea3e78e74071 Mon Sep 17 00:00:00 2001 From: Dmitry Ulyanov Date: Mon, 13 Mar 2017 14:03:13 +0300 Subject: [PATCH 1/3] fix seeding --- dcgan/main.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dcgan/main.py b/dcgan/main.py index 7bd8d29893..04403e7de5 100644 --- a/dcgan/main.py +++ b/dcgan/main.py @@ -31,6 +31,7 @@ parser.add_argument('--netG', default='', help="path to netG (to continue training)") parser.add_argument('--netD', default='', help="path to netD (to continue training)") parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints') +parser.add_argument('--manualSeed', type=int, help='manual seed') opt = parser.parse_args() print(opt) @@ -39,10 +40,14 @@ os.makedirs(opt.outf) except OSError: pass -opt.manualSeed = random.randint(1, 10000) # fix seed + +if opt.manualSeed is None: + opt.manualSeed = random.randint(1, 10000) print("Random Seed: ", opt.manualSeed) random.seed(opt.manualSeed) torch.manual_seed(opt.manualSeed) +if opt.cuda: + torch.cuda.manual_seed_all(opt.manualSeed) cudnn.benchmark = True From a4130911e9f56881156c77ad065b5e1dca8deac2 Mon Sep 17 00:00:00 2001 From: Dmitry Ulyanov Date: Mon, 13 Mar 2017 14:40:45 +0300 Subject: [PATCH 2/3] fix typo --- dcgan/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcgan/README.md b/dcgan/README.md index edf0e2b855..2d28829cf1 100644 --- a/dcgan/README.md +++ b/dcgan/README.md @@ -12,7 +12,7 @@ After every epoch, models are saved to: `netG_epoch_%d.pth` and `netD_epoch_%d.p ##Downloading the dataset You can download the LSUN dataset by cloning [this repo](https://github.com/fyu/lsun) and running ``` -python donwload.py -c bedroom +python download.py -c bedroom ``` ##Usage From 94bae6b5a8af373346744191efed3cf7e1abb59f Mon Sep 17 00:00:00 2001 From: Dmitry Ulyanov Date: Mon, 13 Mar 2017 14:44:03 +0300 Subject: [PATCH 3/3] fixing some codestyle issues found by flake8 --- dcgan/main.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/dcgan/main.py b/dcgan/main.py index 04403e7de5..5da679d324 100644 --- a/dcgan/main.py +++ b/dcgan/main.py @@ -26,8 +26,8 @@ parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for') parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002') parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') -parser.add_argument('--cuda' , action='store_true', help='enables cuda') -parser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use') +parser.add_argument('--cuda', action='store_true', help='enables cuda') +parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use') parser.add_argument('--netG', default='', help="path to netG (to continue training)") parser.add_argument('--netD', default='', help="path to netD (to continue training)") parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints') @@ -89,6 +89,7 @@ ndf = int(opt.ndf) nc = 3 + # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ @@ -98,6 +99,7 @@ def weights_init(m): m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) + class _netG(nn.Module): def __init__(self, ngpu): super(_netG, self).__init__() @@ -124,18 +126,21 @@ def __init__(self, ngpu): nn.Tanh() # state size. (nc) x 64 x 64 ) + def forward(self, input): gpu_ids = None if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: gpu_ids = range(self.ngpu) return nn.parallel.data_parallel(self.main, input, gpu_ids) + netG = _netG(ngpu) netG.apply(weights_init) if opt.netG != '': netG.load_state_dict(torch.load(opt.netG)) print(netG) + class _netD(nn.Module): def __init__(self, ngpu): super(_netD, self).__init__() @@ -160,6 +165,7 @@ def __init__(self, ngpu): nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), nn.Sigmoid() ) + def forward(self, input): gpu_ids = None if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: @@ -167,6 +173,7 @@ def forward(self, input): output = nn.parallel.data_parallel(self.main, input, gpu_ids) return output.view(-1, 1) + netD = _netD(ngpu) netD.apply(weights_init) if opt.netD != '': @@ -195,8 +202,8 @@ def forward(self, input): fixed_noise = Variable(fixed_noise) # setup optimizer -optimizerD = optim.Adam(netD.parameters(), lr = opt.lr, betas = (opt.beta1, 0.999)) -optimizerG = optim.Adam(netG.parameters(), lr = opt.lr, betas = (opt.beta1, 0.999)) +optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) +optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) for epoch in range(opt.niter): for i, data in enumerate(dataloader, 0): @@ -231,7 +238,7 @@ def forward(self, input): # (2) Update G network: maximize log(D(G(z))) ########################### netG.zero_grad() - label.data.fill_(real_label) # fake labels are real for generator cost + label.data.fill_(real_label) # fake labels are real for generator cost output = netD(fake) errG = criterion(output, label) errG.backward()