-
Notifications
You must be signed in to change notification settings - Fork 4
/
mvae_conv_model.py
114 lines (100 loc) · 3.63 KB
/
mvae_conv_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
from __future__ import print_function
import argparse
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import os
import random
import torch.utils.data
import torchvision.utils as vutils
import torch.backends.cudnn as cudnn
ngf = 64
ndf = 64
nc = 1
class VAE(nn.Module):
def __init__(self, nz):
super(VAE, self).__init__()
self.have_cuda = False
self.nz = nz
self.encoder = nn.Sequential(
# input is (nc) x 28 x 28
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 14 x 14
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 7 x 7
nn.Conv2d(ndf * 2, ndf * 4, 3, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 4 x 4
nn.Conv2d(ndf * 4, 1024, 4, 1, 0, bias=False),
# nn.BatchNorm2d(1024),
nn.LeakyReLU(0.2, inplace=True),
# nn.Sigmoid()
)
self.decoder = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( 1024, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 3, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, nc, 4, 2, 1, bias=False),
# nn.BatchNorm2d(ngf),
# nn.ReLU(True),
# state size. (ngf) x 32 x 32
# nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
# nn.Tanh()
nn.Sigmoid()
# state size. (nc) x 64 x 64
)
self.fc1 = nn.Linear(1024, 512)
self.fc21 = nn.Linear(512, nz)
self.fc22 = nn.Linear(512, nz)
self.fc3 = nn.Linear(nz, 512)
self.fc4 = nn.Linear(512, 1024)
self.lrelu = nn.LeakyReLU()
self.relu = nn.ReLU()
# self.sigmoid = nn.Sigmoid()
def encode(self, x):
conv = self.encoder(x);
# print("encode conv", conv.size())
h1 = self.fc1(conv.view(-1, 1024))
# print("encode h1", h1.size())
return self.fc21(h1), self.fc22(h1)
def decode(self, z):
h3 = self.relu(self.fc3(z))
deconv_input = self.fc4(h3)
# print("deconv_input", deconv_input.size())
deconv_input = deconv_input.view(-1,1024,1,1)
# print("deconv_input", deconv_input.size())
return self.decoder(deconv_input)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if self.have_cuda:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, x):
# print("x", x.size())
mu, logvar = self.encode(x)
# print("mu, logvar", mu.size(), logvar.size())
z = self.reparametrize(mu, logvar)
# print("z", z.size())
decoded = self.decode(z)
# print("decoded", decoded.size())
return decoded, mu, logvar