/
DCGAN_nets.py
101 lines (88 loc) · 3.7 KB
/
DCGAN_nets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
class DCGANGenerator_cifar10(nn.Module):
def __init__(self, z_dim, ngf=64, output_nc=3, norm_layer=nn.BatchNorm2d):
super(DCGANGenerator_cifar10, self).__init__()
self.z_dim = z_dim
self.ngf = ngf
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
use_bias = True
seq = [nn.ConvTranspose2d(z_dim, ngf*4, 4, stride=1, padding=0, bias=use_bias),
norm_layer(ngf*4),
nn.ReLU(),
nn.ConvTranspose2d(ngf*4, ngf*2, 4, stride=2, padding=(1,1), bias=use_bias),
norm_layer(ngf*2),
nn.ReLU(),
nn.ConvTranspose2d(ngf*2, ngf, 4, stride=2, padding=(1,1), bias=use_bias),
norm_layer(ngf),
nn.ReLU(),
nn.ConvTranspose2d(ngf, output_nc, 4, stride=2, padding=(1,1)),
nn.Tanh()]
self.model = nn.Sequential(*seq)
def forward(self, input):
return self.model(input.view(-1, self.z_dim, 1, 1))
class DCGANDiscriminator_cifar10(nn.Module):
def __init__(self, ndf=64, input_nc=3, norm_layer=nn.BatchNorm2d):
super(DCGANDiscriminator_cifar10, self).__init__()
self.ndf = ndf
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
use_bias = True
seq = [nn.Conv2d(input_nc, ndf, 4, stride=2, padding=(1,1), bias=use_bias),
#norm_layer(ndf),
nn.LeakyReLU(0.2),
nn.Conv2d(ndf, ndf*2, 4, stride=2, padding=(1,1), bias=use_bias),
norm_layer(ndf*2),
nn.LeakyReLU(0.2),
nn.Conv2d(ndf*2, ndf*4, 4, stride=2, padding=(1,1), bias=use_bias),
norm_layer(ndf*4),
nn.LeakyReLU(0.2)]
self.cnn_model = nn.Sequential(*seq)
fc = [nn.Linear(4*4*ndf*4, 1)]
self.fc = nn.Sequential(*fc)
def forward(self, input):
x = self.cnn_model(input)
x = x.view(-1, 4*4*self.ndf*4)
x = self.fc(x)
return(x)
#class DCGANDiscriminator_cifar10(nn.Module):
# def __init__(self, ndf=64, input_nc=3, norm_layer=nn.BatchNorm2d):
# super(DCGANDiscriminator_cifar10, self).__init__()
#
# self.ndf = ndf
# if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
# use_bias = norm_layer.func != nn.BatchNorm2d
# else:
# use_bias = norm_layer != nn.BatchNorm2d
#
# seq = [nn.Conv2d(input_nc, ndf, 3, stride=1, padding=(1,1), bias=use_bias),
# nn.LeakyReLU(0.2),
# nn.Conv2d(ndf, ndf*2, 4, stride=2, padding=(1,1), bias=use_bias),
# norm_layer(ndf*2),
# nn.LeakyReLU(0.2),
# nn.Conv2d(ndf*2, ndf*4, 4, stride=2, padding=(1,1), bias=use_bias),
# norm_layer(ndf*4),
# nn.LeakyReLU(0.2),
# nn.Conv2d(ndf*4, ndf*8, 4, stride=2, padding=(1,1), bias=use_bias),
# norm_layer(ndf*8),
# nn.LeakyReLU(0.2)]
#
# self.cnn_model = nn.Sequential(*seq)
#
# fc = [nn.Linear(4*4*ndf*8, 1)]
# self.fc = nn.Sequential(*fc)
#
# def forward(self, input):
# x = self.cnn_model(input)
# x = x.view(-1, 4*4*self.ndf*8)
# x = self.fc(x)
# return(x)
#