diff --git a/aae/aae.py b/aae/aae.py index c606d2ba38..9a33e34429 100644 --- a/aae/aae.py +++ b/aae/aae.py @@ -1,28 +1,47 @@ -from __future__ import print_function, division +## USAGE: python 'aae.py' --entity your-wandb-id --project your-project --latentdim 10 --epochs 20000 -from keras.datasets import mnist -from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise -from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D -from keras.layers import MaxPooling2D, merge -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import UpSampling2D, Conv2D -from keras.models import Sequential, Model -from keras.optimizers import Adam -from keras import losses -from keras.utils import to_categorical -import keras.backend as K +from __future__ import print_function, division +import argparse +import numpy as np import matplotlib.pyplot as plt -import numpy as np +from tensorflow.keras.datasets import mnist +from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Lambda +from tensorflow.keras.layers import Activation +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import LeakyReLU +from tensorflow.keras.layers import UpSampling2D, Conv2D +from tensorflow.keras.models import Sequential, Model +from tensorflow.keras.optimizers import Adam +import tensorflow.keras.backend as K + + +import wandb +from wandb.keras import WandbCallback + +parser = argparse.ArgumentParser() +parser.add_argument('--entity', type=str, + help="provide wandb entity") +parser.add_argument('--project', type=str, + help="provide wandb project name") +parser.add_argument('--latentdim', type=int, default=10, + help="specify the latent dimentions") +parser.add_argument("--epochs", type=int, default=20000, + help="number of epochs") +parser.add_argument("--batch", type=int, default=32, + help="batch size to be used") +parser.add_argument("--gen_interval", type=int, default=10, + help="log generated images after interval") +args = parser.parse_args() class AdversarialAutoencoder(): - def __init__(self): + def __init__(self, latent_dim): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) - self.latent_dim = 10 + self.latent_dim = latent_dim optimizer = Adam(0.0002, 0.5) @@ -67,12 +86,15 @@ def build_encoder(self): h = LeakyReLU(alpha=0.2)(h) mu = Dense(self.latent_dim)(h) log_var = Dense(self.latent_dim)(h) - latent_repr = merge([mu, log_var], - mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2), - output_shape=lambda p: p[0]) + latent_repr = Lambda(self.latent, output_shape=(self.latent_dim, ))([mu, log_var]) return Model(img, latent_repr) + def latent(self, p): + """Sample based on `mu` and `log_var`""" + mu, log_var = p + return mu + K.random_normal(K.shape(mu)) * K.exp(log_var / 2) + def build_decoder(self): model = Sequential() @@ -147,6 +169,7 @@ def train(self, epochs, batch_size=128, sample_interval=50): # Plot the progress print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1])) + wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[1], 'generator_loss': g_loss[0], 'mse': g_loss[1]}) # If at save interval => save generated image samples if epoch % sample_interval == 0: @@ -168,6 +191,7 @@ def sample_images(self, epoch): axs[i,j].axis('off') cnt += 1 fig.savefig("images/mnist_%d.png" % epoch) + wandb.log({'aae_generated_imgs': plt}) plt.close() def save_model(self): @@ -186,5 +210,14 @@ def save(model, model_name): if __name__ == '__main__': - aae = AdversarialAutoencoder() - aae.train(epochs=20000, batch_size=32, sample_interval=200) + + wandb.init(entity=args.entity, project=args.project) + config = wandb.config + config.epochs = args.epochs + config.batch_size = args.batch + config.save_interval = args.gen_interval + + config.latent_dim = args.latentdim + + aae = AdversarialAutoencoder(config.latent_dim) + aae.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval) diff --git a/acgan/acgan.py b/acgan/acgan.py index b9a5b24698..eaa9eba471 100755 --- a/acgan/acgan.py +++ b/acgan/acgan.py @@ -1,26 +1,50 @@ -from __future__ import print_function, division +## USAGE: python 'acgan.py' --entity your-wandb-id --project your-project --latentdim 10 --epochs 14000 -from keras.datasets import mnist -from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply -from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import UpSampling2D, Conv2D -from keras.models import Sequential, Model -from keras.optimizers import Adam +from __future__ import print_function, division +import argparse +import numpy as np import matplotlib.pyplot as plt -import numpy as np +from tensorflow.keras.datasets import mnist +from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply +from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import LeakyReLU +from tensorflow.keras.layers import UpSampling2D, Conv2D +from tensorflow.keras.models import Sequential, Model +from tensorflow.keras.optimizers import Adam +import tensorflow.keras.backend as K + + +import wandb +from wandb.keras import WandbCallback + +parser = argparse.ArgumentParser() +parser.add_argument('--entity', type=str, + help="provide wandb entity") +parser.add_argument('--project', type=str, + help="provide wandb project name") +parser.add_argument('--latentdim', type=int, default=10, + help="specify the latent dimentions") +parser.add_argument("--epochs", type=int, default=14000, + help="number of epochs") +parser.add_argument("--batch", type=int, default=32, + help="batch size to be used") +parser.add_argument("--gen_interval", type=int, default=10, + help="log generated images after interval") +args = parser.parse_args() + class ACGAN(): - def __init__(self): + def __init__(self, latent_dim): # Input shape self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.num_classes = 10 - self.latent_dim = 100 + self.latent_dim = latent_dim optimizer = Adam(0.0002, 0.5) losses = ['binary_crossentropy', 'sparse_categorical_crossentropy'] @@ -167,6 +191,8 @@ def train(self, epochs, batch_size=128, sample_interval=50): # Plot the progress print ("%d [D loss: %f, acc.: %.2f%%, op_acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[3], 100*d_loss[4], g_loss[0])) + wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[3], 'op_accuracy': 100*d_loss[4], 'generator_loss': g_loss[0]}) + # If at save interval => save generated image samples if epoch % sample_interval == 0: @@ -189,6 +215,7 @@ def sample_images(self, epoch): axs[i,j].axis('off') cnt += 1 fig.savefig("images/%d.png" % epoch) + wandb.log({'acgan_generated_imgs': plt}) plt.close() def save_model(self): @@ -207,5 +234,17 @@ def save(model, model_name): if __name__ == '__main__': - acgan = ACGAN() - acgan.train(epochs=14000, batch_size=32, sample_interval=200) + + wandb.init(entity=args.entity, project=args.project) + config = wandb.config + + config.epochs = args.epochs + config.batch_size = args.batch + config.save_interval = args.gen_interval + + config.latent_dim = args.latentdim + + + + acgan = ACGAN(config.latent_dim) + acgan.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval) diff --git a/bgan/bgan.py b/bgan/bgan.py index c891ccc7bb..57a2a14c31 100644 --- a/bgan/bgan.py +++ b/bgan/bgan.py @@ -1,28 +1,49 @@ -from __future__ import print_function, division +## USAGE: python 'bgan.py' --entity your-wandb-id --project your-project --latentdim 100 --epochs 30000 -from keras.datasets import mnist -from keras.layers import Input, Dense, Reshape, Flatten, Dropout -from keras.layers import BatchNormalization, Activation, ZeroPadding2D -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import UpSampling2D, Conv2D -from keras.models import Sequential, Model -from keras.optimizers import Adam -import keras.backend as K +from __future__ import print_function, division +import argparse +import numpy as np import matplotlib.pyplot as plt -import sys +from tensorflow.keras.datasets import mnist +from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout +from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import LeakyReLU +from tensorflow.keras.layers import UpSampling2D, Conv2D +from tensorflow.keras.models import Sequential, Model +from tensorflow.keras.optimizers import Adam +import tensorflow.keras.backend as K + + +import wandb +from wandb.keras import WandbCallback + +parser = argparse.ArgumentParser() +parser.add_argument('--entity', type=str, + help="provide wandb entity") +parser.add_argument('--project', type=str, + help="provide wandb project name") +parser.add_argument('--latentdim', type=int, default=100, + help="specify the latent dimentions") +parser.add_argument("--epochs", type=int, default=30000, + help="number of epochs") +parser.add_argument("--batch", type=int, default=32, + help="batch size to be used") +parser.add_argument("--gen_interval", type=int, default=10, + help="log generated images after interval") +args = parser.parse_args() -import numpy as np class BGAN(): """Reference: https://wiseodd.github.io/techblog/2017/03/07/boundary-seeking-gan/""" - def __init__(self): + def __init__(self, latent_dim): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) - self.latent_dim = 100 + self.latent_dim = latent_dim optimizer = Adam(0.0002, 0.5) @@ -139,6 +160,7 @@ def train(self, epochs, batch_size=128, sample_interval=50): # Plot the progress print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss)) + wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[1], 'generator_loss': g_loss}) # If at save interval => save generated image samples if epoch % sample_interval == 0: @@ -159,9 +181,20 @@ def sample_images(self, epoch): axs[i,j].axis('off') cnt += 1 fig.savefig("images/mnist_%d.png" % epoch) + wandb.log({'bgan_generated_imgs': plt}) plt.close() if __name__ == '__main__': - bgan = BGAN() - bgan.train(epochs=30000, batch_size=32, sample_interval=200) + + wandb.init(entity=args.entity, project=args.project) + config = wandb.config + + config.epochs = args.epochs + config.batch_size = args.batch + config.save_interval = args.gen_interval + + config.latent_dim = args.latentdim + + bgan = BGAN(config.latent_dim) + bgan.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval) diff --git a/bigan/bigan.py b/bigan/bigan.py index 61c837e7e4..44ed5a01a7 100644 --- a/bigan/bigan.py +++ b/bigan/bigan.py @@ -1,28 +1,48 @@ -from __future__ import print_function, division +## USAGE: python 'bigan.py' --entity your-wandb-id --project your-project --latentdim 100 --epochs 40000 -from keras.datasets import mnist -from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise -from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D -from keras.layers import MaxPooling2D, concatenate -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import UpSampling2D, Conv2D -from keras.models import Sequential, Model -from keras.optimizers import Adam -from keras import losses -from keras.utils import to_categorical -import keras.backend as K +from __future__ import print_function, division +import argparse +import numpy as np import matplotlib.pyplot as plt -import numpy as np +from tensorflow.keras.datasets import mnist +from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise +from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D +from tensorflow.keras.layers import MaxPooling2D, concatenate +from tensorflow.keras.layers import LeakyReLU +from tensorflow.keras.layers import UpSampling2D, Conv2D +from tensorflow.keras.models import Sequential, Model +from tensorflow.keras.optimizers import Adam +import tensorflow.keras.backend as K + + +import wandb +from wandb.keras import WandbCallback + +parser = argparse.ArgumentParser() +parser.add_argument('--entity', type=str, + help="provide wandb entity") +parser.add_argument('--project', type=str, + help="provide wandb project name") +parser.add_argument('--latentdim', type=int, default=100, + help="specify the latent dimentions") +parser.add_argument("--epochs", type=int, default=40000, + help="number of epochs") +parser.add_argument("--batch", type=int, default=32, + help="batch size to be used") +parser.add_argument("--gen_interval", type=int, default=10, + help="log generated images after interval") +args = parser.parse_args() + class BIGAN(): - def __init__(self): + def __init__(self, latent_dim): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) - self.latent_dim = 100 + self.latent_dim = latent_dim optimizer = Adam(0.0002, 0.5) @@ -160,6 +180,8 @@ def train(self, epochs, batch_size=128, sample_interval=50): # Plot the progress print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0])) + wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[1], 'generator_loss': g_loss[0]}) + # If at save interval => save generated image samples if epoch % sample_interval == 0: @@ -180,9 +202,20 @@ def sample_interval(self, epoch): axs[i,j].axis('off') cnt += 1 fig.savefig("images/mnist_%d.png" % epoch) + wandb.log({'bigan_generated_imgs': plt}) plt.close() if __name__ == '__main__': - bigan = BIGAN() - bigan.train(epochs=40000, batch_size=32, sample_interval=400) + + wandb.init(entity=args.entity, project=args.project) + config = wandb.config + + config.epochs = args.epochs + config.batch_size = args.batch + config.save_interval = args.gen_interval + + config.latent_dim = args.latentdim + + bigan = BIGAN(config.latent_dim) + bigan.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval) diff --git a/ccgan/ccgan.py b/ccgan/ccgan.py index e4d6ddfe08..cbf86d04e9 100644 --- a/ccgan/ccgan.py +++ b/ccgan/ccgan.py @@ -1,22 +1,43 @@ -from __future__ import print_function, division +## USAGE: python 'ccgan.py' --entity your-wandb-id --project your-project --epochs 20000 -from keras.datasets import mnist -from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization -from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise -from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D -from keras.layers import Concatenate -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import UpSampling2D, Conv2D -from keras.models import Sequential, Model -from keras.optimizers import Adam -from keras import losses -from keras.utils import to_categorical -import keras.backend as K -import scipy -import matplotlib.pyplot as plt +from __future__ import print_function, division +import argparse import numpy as np +import matplotlib.pyplot as plt + +from PIL import Image + +from tensorflow.keras.datasets import mnist +from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise +from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D +from tensorflow.keras.layers import MaxPooling2D, Concatenate +from tensorflow.keras.layers import LeakyReLU +from tensorflow.keras.layers import UpSampling2D, Conv2D +from tensorflow.keras.models import Sequential, Model +from tensorflow.keras.optimizers import Adam +import tensorflow.keras.backend as K +from tensorflow.keras.utils import to_categorical + +from tensorflow_addons.layers import InstanceNormalization + +import wandb +from wandb.keras import WandbCallback + +parser = argparse.ArgumentParser() +parser.add_argument('--entity', type=str, + help="provide wandb entity") +parser.add_argument('--project', type=str, + help="provide wandb project name") +parser.add_argument("--epochs", type=int, default=20000, + help="number of epochs") +parser.add_argument("--batch", type=int, default=32, + help="batch size to be used") +parser.add_argument("--gen_interval", type=int, default=10, + help="log generated images after interval") +args = parser.parse_args() + class CCGAN(): def __init__(self): @@ -148,7 +169,7 @@ def train(self, epochs, batch_size=128, sample_interval=50): (X_train, y_train), (_, _) = mnist.load_data() # Rescale MNIST to 32x32 - X_train = np.array([scipy.misc.imresize(x, [self.img_rows, self.img_cols]) for x in X_train]) + X_train = np.array([np.array(Image.fromarray(x).resize((self.img_rows, self.img_cols),Image.BICUBIC)) for x in X_train]) # Rescale -1 to 1 X_train = (X_train.astype(np.float32) - 127.5) / 127.5 @@ -193,6 +214,7 @@ def train(self, epochs, batch_size=128, sample_interval=50): # Plot the progress print ("%d [D loss: %f, op_acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[4], g_loss)) + wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'op_accuracy': 100*d_loss[4], 'generator_loss': g_loss}) # If at save interval => save generated image samples if epoch % sample_interval == 0: @@ -223,6 +245,7 @@ def sample_images(self, epoch, imgs): axs[2,i].imshow(gen_imgs[i, :, :, 0], cmap='gray') axs[2,i].axis('off') fig.savefig("images/%d.png" % epoch) + wandb.log({'ccgan_generated_imgs': plt}) plt.close() def save_model(self): @@ -241,5 +264,12 @@ def save(model, model_name): if __name__ == '__main__': + wandb.init(entity=args.entity, project=args.project) + config = wandb.config + + config.epochs = args.epochs + config.batch_size = args.batch + config.save_interval = args.gen_interval + ccgan = CCGAN() - ccgan.train(epochs=20000, batch_size=32, sample_interval=200) + ccgan.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval) diff --git a/cgan/cgan.py b/cgan/cgan.py index e66acb6e4a..6055912711 100755 --- a/cgan/cgan.py +++ b/cgan/cgan.py @@ -1,26 +1,48 @@ from __future__ import print_function, division -from keras.datasets import mnist -from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply -from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import UpSampling2D, Conv2D -from keras.models import Sequential, Model -from keras.optimizers import Adam - +import argparse +import numpy as np import matplotlib.pyplot as plt -import numpy as np +from tensorflow.keras.datasets import mnist +from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply +from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import LeakyReLU +from tensorflow.keras.layers import UpSampling2D, Conv2D +from tensorflow.keras.models import Sequential, Model +from tensorflow.keras.optimizers import Adam +import tensorflow.keras.backend as K + + +import wandb +from wandb.keras import WandbCallback + +parser = argparse.ArgumentParser() +parser.add_argument('--entity', type=str, + help="provide wandb entity") +parser.add_argument('--project', type=str, + help="provide wandb project name") +parser.add_argument('--latentdim', type=int, default=100, + help="specify the latent dimentions") +parser.add_argument("--epochs", type=int, default=20000, + help="number of epochs") +parser.add_argument("--batch", type=int, default=32, + help="batch size to be used") +parser.add_argument("--gen_interval", type=int, default=10, + help="log generated images after interval") +args = parser.parse_args() + class CGAN(): - def __init__(self): + def __init__(self, latent_dim): # Input shape self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.num_classes = 10 - self.latent_dim = 100 + self.latent_dim = latent_dim optimizer = Adam(0.0002, 0.5) @@ -153,6 +175,7 @@ def train(self, epochs, batch_size=128, sample_interval=50): # Plot the progress print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss)) + wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[1], 'generator_loss': g_loss}) # If at save interval => save generated image samples if epoch % sample_interval == 0: @@ -177,9 +200,20 @@ def sample_images(self, epoch): axs[i,j].axis('off') cnt += 1 fig.savefig("images/%d.png" % epoch) + wandb.log({'cgan_generated_imgs': plt}) plt.close() if __name__ == '__main__': - cgan = CGAN() - cgan.train(epochs=20000, batch_size=32, sample_interval=200) + + wandb.init(entity=args.entity, project=args.project) + config = wandb.config + + config.epochs = args.epochs + config.batch_size = args.batch + config.save_interval = args.gen_interval + + config.latent_dim = args.latentdim + + cgan = CGAN(config.latent_dim) + cgan.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval) diff --git a/cogan/cogan.py b/cogan/cogan.py index 622092b18d..7d0c9c9671 100644 --- a/cogan/cogan.py +++ b/cogan/cogan.py @@ -1,28 +1,48 @@ from __future__ import print_function, division -import scipy - -from keras.datasets import mnist -from keras.layers import Input, Dense, Reshape, Flatten, Dropout -from keras.layers import BatchNormalization, Activation, ZeroPadding2D -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import UpSampling2D, Conv2D -from keras.models import Sequential, Model -from keras.optimizers import Adam +import argparse +import numpy as np import matplotlib.pyplot as plt -import sys +import scipy + +from tensorflow.keras.datasets import mnist +from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply +from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import LeakyReLU +from tensorflow.keras.layers import UpSampling2D, Conv2D +from tensorflow.keras.models import Sequential, Model +from tensorflow.keras.optimizers import Adam +import tensorflow.keras.backend as K + +import wandb +from wandb.keras import WandbCallback + +parser = argparse.ArgumentParser() +parser.add_argument('--entity', type=str, + help="provide wandb entity") +parser.add_argument('--project', type=str, + help="provide wandb project name") +parser.add_argument('--latentdim', type=int, default=100, + help="specify the latent dimentions") +parser.add_argument("--epochs", type=int, default=30000, + help="number of epochs") +parser.add_argument("--batch", type=int, default=32, + help="batch size to be used") +parser.add_argument("--gen_interval", type=int, default=10, + help="log generated images after interval") +args = parser.parse_args() -import numpy as np class COGAN(): """Reference: https://wiseodd.github.io/techblog/2017/02/18/coupled_gan/""" - def __init__(self): + def __init__(self, latent_dim): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) - self.latent_dim = 100 + self.latent_dim = latent_dim optimizer = Adam(0.0002, 0.5) @@ -167,6 +187,14 @@ def train(self, epochs, batch_size=128, sample_interval=50): print ("%d [D1 loss: %f, acc.: %.2f%%] [D2 loss: %f, acc.: %.2f%%] [G loss: %f]" \ % (epoch, d1_loss[0], 100*d1_loss[1], d2_loss[0], 100*d2_loss[1], g_loss[0])) + wandb.log({'epoch': epoch, + 'discriminator1_loss': d1_loss[0], + 'accuracy1': 100*d1_loss[1], + 'discriminator2_loss': d2_loss[0], + 'accuracy2': 100*d2_loss[1], + 'generator_loss': g_loss[0]}) + + # If at save interval => save generated image samples if epoch % sample_interval == 0: self.sample_images(epoch) @@ -190,9 +218,20 @@ def sample_images(self, epoch): axs[i,j].axis('off') cnt += 1 fig.savefig("images/mnist_%d.png" % epoch) + wandb.log({'cogan_generated_imgs': plt}) plt.close() if __name__ == '__main__': - gan = COGAN() - gan.train(epochs=30000, batch_size=32, sample_interval=200) + + wandb.init(entity=args.entity, project=args.project) + config = wandb.config + + config.epochs = args.epochs + config.batch_size = args.batch + config.save_interval = args.gen_interval + + config.latent_dim = args.latentdim + + gan = COGAN(config.latent_dim) + gan.train(epochs=config.epochs, batch_size=config.batch_size, sample_interval=config.save_interval) diff --git a/dcgan/dcgan.py b/dcgan/dcgan.py index 38c369e49c..f6d6f66e69 100644 --- a/dcgan/dcgan.py +++ b/dcgan/dcgan.py @@ -1,27 +1,46 @@ -from __future__ import print_function, division - -from keras.datasets import mnist -from keras.layers import Input, Dense, Reshape, Flatten, Dropout -from keras.layers import BatchNormalization, Activation, ZeroPadding2D -from keras.layers.advanced_activations import LeakyReLU -from keras.layers.convolutional import UpSampling2D, Conv2D -from keras.models import Sequential, Model -from keras.optimizers import Adam +## USAGE: python 'dcgan.py' --entity your-wandb-id --project your-project --latentdim 10 --epochs 4000 -import matplotlib.pyplot as plt +from __future__ import print_function, division import sys - import numpy as np +import matplotlib.pyplot as plt +import argparse + +from tensorflow.keras.datasets import mnist +from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout +from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D +from tensorflow.keras.layers import LeakyReLU +from tensorflow.keras.layers import UpSampling2D, Conv2D +from tensorflow.keras.models import Sequential, Model +from tensorflow.keras.optimizers import Adam + +import wandb +from wandb.keras import WandbCallback + +parser = argparse.ArgumentParser() +parser.add_argument('--entity', type=str, + help="provide wandb entity") +parser.add_argument('--project', type=str, + help="provide wandb project name") +parser.add_argument('--latentdim', type=int, default=10, + help="specify the latent dimentions") +parser.add_argument("--epochs", type=int, default=4000, + help="number of epochs") +parser.add_argument("--batch", type=int, default=32, + help="batch size to be used") +parser.add_argument("--gen_interval", type=int, default=50, + help="log generated images after interval") +args = parser.parse_args() class DCGAN(): - def __init__(self): + def __init__(self, latent_dim): # Input shape self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) - self.latent_dim = 100 + self.latent_dim = latent_dim optimizer = Adam(0.0002, 0.5) @@ -144,6 +163,7 @@ def train(self, epochs, batch_size=128, save_interval=50): # Plot the progress print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss)) + wandb.log({'epoch': epoch, 'discriminator_loss': d_loss[0], 'accuracy': 100*d_loss[1], 'generator_loss': g_loss}) # If at save interval => save generated image samples if epoch % save_interval == 0: @@ -164,10 +184,20 @@ def save_imgs(self, epoch): axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray') axs[i,j].axis('off') cnt += 1 - fig.savefig("images/mnist_%d.png" % epoch) + # fig.savefig("images/mnist_%d.png" % epoch) + wandb.log({'gan_generated_imgs': plt}) plt.close() if __name__ == '__main__': - dcgan = DCGAN() - dcgan.train(epochs=4000, batch_size=32, save_interval=50) + + wandb.init(entity=args.entity, project=args.project) + config = wandb.config + config.epochs = args.epochs + config.batch_size = args.batch + config.save_interval = args.gen_interval + + config.latent_dim = args.latentdim + + dcgan = DCGAN(latent_dim=config.latent_dim) + dcgan.train(epochs=config.epochs, batch_size=config.batch_size, save_interval=config.save_interval)