Skip to content

Commit

Permalink
remove dead code
Browse files Browse the repository at this point in the history
  • Loading branch information
NizarIslah committed Apr 21, 2023
1 parent 11c1f72 commit f169774
Showing 1 changed file with 0 additions and 87 deletions.
87 changes: 0 additions & 87 deletions ambiguous/models/cvae.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,90 +357,3 @@ def loss_function(self,rec,x,mu,logvar):
rec_loss = F.mse_loss(rec, x, reduction='sum')
loss = rec_loss + kld_loss
return {'loss': loss, 'rec':rec_loss.detach(), 'kld':kld_loss.detach()}

####################################
class EMNIST_Encoder(nn.Module):
def __init__(self, latent_dim, layer_sizes, n_classes, conditional=False):
super(EMNIST_Encoder, self).__init__()
self.MLP = nn.Sequential()

self.conditional = conditional
if self.conditional:
layer_sizes[0] += n_classes

for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
self.MLP.add_module(
name="L{:d}".format(i), module=nn.Linear(in_size, out_size))
self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU())
self.fc_mu = nn.Linear(layer_sizes[-1], latent_dim)
self.fc_logvar = nn.Linear(layer_sizes[-1], latent_dim)

self.device = device

def forward(self, x, c=None):
if self.conditional:
x = torch.cat([x, c], 1)
h = self.MLP(x)
mu, logvar = self.fc_mu(h), self.fc_logvar(h)
return mu, logvar

def sample(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if self.device == 'cuda':
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
return eps.mul(std).add_(mu)

class EMNIST_Decoder(nn.Module):
def __init__(self, latent_dim, layer_sizes, n_classes, conditional=False):
super(EMNIST_Decoder, self).__init__()

self.MLP = nn.Sequential()
self.conditional = conditional
if self.conditional:
input_size = latent_dim + n_classes
else:
input_size = latent_dim

for i, (in_size, out_size) in enumerate(zip([input_size]+layer_sizes[:-1], layer_sizes)):
self.MLP.add_module(
name="L{:d}".format(i), module=nn.Linear(in_size, out_size))
if i+1 < len(layer_sizes):
self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU())
else:
self.MLP.add_module(name="sigmoid", module=nn.Sigmoid())
def forward(self, z, c):
if self.conditional:
z = torch.cat([z, c], 1)
rec = self.MLP(z)
return rec


class EMNIST_EncoderV1(nn.Module):
def __init__(self, latent_dim, layer_sizes):
super(EMNIST_EncoderV1, self).__init__()
self.MLP = nn.Sequential()
for i, (in_size, out_size) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
self.MLP.add_module(
name="L{:d}".format(i), module=nn.Linear(in_size, out_size))
self.MLP.add_module(name="A{:d}".format(i), module=nn.ReLU())
self.fc_mu = nn.Linear(layer_sizes[-1], latent_dim)
self.fc_logvar = nn.Linear(layer_sizes[-1], latent_dim)

self.device = device

def forward(self, x):
h = self.MLP(x)
mu, logvar = self.fc_mu(h), self.fc_logvar(h)
return mu, logvar

def sample(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if self.device == 'cuda':
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
return eps.mul(std).add_(mu)


0 comments on commit f169774

Please sign in to comment.