GANs use two networks competing against each other to generate data.

1) Generator : Recieves random noise (Gaussian Distribution).
Outputs data (often an image)

2) Discriminator:
Takes a data set consisting of real images from the real data set and fake images from the generator
Attempt to classify real vs fake images (always binary classification)

import numpy as np
import pandas as pd 
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
(X_train,y_train), (X_test, y_test) = mnist.load_data()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 2s 0us/step
plt.imshow(X_train[0])
<matplotlib.image.AxesImage at 0x204969adcf8>
y_train
array([5, 0, 4, ..., 5, 6, 8], dtype=uint8)
only_zeros = X_train[y_train==0]
print(X_train.shape, only_zeros.shape)
(60000, 28, 28) (5923, 28, 28)
plt.imshow(only_zeros[19])
<matplotlib.image.AxesImage at 0x20496faab70>
import tensorflow as tf
from tensorflow.keras.layers import Dense,Reshape,Flatten
from tensorflow.keras.models import Sequential
discriminator = Sequential()
discriminator.add(Flatten(input_shape=[28,28]))
discriminator.add(Dense(150,activation='relu'))
discriminator.add(Dense(100,activation='relu'))
# Final output layer
discriminator.add(Dense(1,activation='sigmoid'))

discriminator.compile(loss='binary_crossentropy',optimizer='adam')
discriminator.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten (Flatten)            (None, 784)               0         
_________________________________________________________________
dense (Dense)                (None, 150)               117750    
_________________________________________________________________
dense_1 (Dense)              (None, 100)               15100     
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 101       
=================================================================
Total params: 132,951
Trainable params: 132,951
Non-trainable params: 0
_________________________________________________________________
codings_size = 100

generator = Sequential()
generator.add(Dense(100,activation='relu',input_shape=[codings_size]))
generator.add(Dense(150,activation='relu'))
generator.add(Dense(784,activation='relu'))

# Discriminator expects shape of 28x28
generator.add(Reshape([28,28]))

# We do not compile the generator
GAN = Sequential([generator,discriminator])
generator.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_3 (Dense)              (None, 100)               10100     
_________________________________________________________________
dense_4 (Dense)              (None, 150)               15150     
_________________________________________________________________
dense_5 (Dense)              (None, 784)               118384    
_________________________________________________________________
reshape (Reshape)            (None, 28, 28)            0         
=================================================================
Total params: 143,634
Trainable params: 143,634
Non-trainable params: 0
_________________________________________________________________
GAN.summary()
Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
sequential_1 (Sequential)    (None, 28, 28)            143634    
_________________________________________________________________
sequential (Sequential)      (None, 1)                 132951    
=================================================================
Total params: 276,585
Trainable params: 143,634
Non-trainable params: 132,951
_________________________________________________________________
GAN.layers
[<tensorflow.python.keras.engine.sequential.Sequential at 0x204990c9f98>,
 <tensorflow.python.keras.engine.sequential.Sequential at 0x20483bcf6a0>]
GAN.layers[1].summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten (Flatten)            (None, 784)               0         
_________________________________________________________________
dense (Dense)                (None, 150)               117750    
_________________________________________________________________
dense_1 (Dense)              (None, 100)               15100     
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 101       
=================================================================
WARNING:tensorflow:Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?
Total params: 265,902
Trainable params: 132,951
Non-trainable params: 132,951
_________________________________________________________________
discriminator.trainable = False # Shouldn't be trained in the second phase
GAN.compile(loss='binary_crossentropy',optimizer='adam')
batch_size = 32

my_data = only_zeros
dataset = tf.data.Dataset.from_tensor_slices(my_data).shuffle(buffer_size=1000)
type(dataset)
tensorflow.python.data.ops.dataset_ops.ShuffleDataset
dataset = dataset.batch(batch_size,drop_remainder=True).prefetch(1)
epochs = 1
noise = tf.random.normal(shape=[batch_size,codings_size])
noise
<tf.Tensor: shape=(32, 100), dtype=float32, numpy=
array([[-0.17894243,  0.28046852, -1.2658674 , ..., -1.8558825 ,
         0.49134666, -0.02820994],
       [-0.57840776,  1.028697  ,  1.0578247 , ...,  1.2367849 ,
        -1.26806   , -0.07681153],
       [ 1.2107298 ,  1.1793936 , -0.17294073, ...,  1.1151178 ,
         0.6852811 ,  1.0923375 ],
       ...,
       [-0.23432946, -1.8061259 ,  0.12568073, ...,  0.70886606,
        -0.14349754, -0.61847156],
       [-1.3304747 , -1.5051615 , -0.9862153 , ...,  1.2475176 ,
        -1.0224384 ,  0.431593  ],
       [-0.12189417,  0.76724905,  0.23410986, ..., -1.2387109 ,
         1.165128  , -2.2503793 ]], dtype=float32)>
generator(noise)
<tf.Tensor: shape=(32, 28, 28), dtype=float32, numpy=
array([[[1.39178470e-01, 0.00000000e+00, 1.32437050e-01, ...,
         0.00000000e+00, 1.24021992e-01, 6.28608763e-02],
        [6.59890613e-03, 2.57760793e-01, 0.00000000e+00, ...,
         1.15738064e-01, 2.14011580e-01, 5.65851629e-02],
        [0.00000000e+00, 0.00000000e+00, 1.05919935e-01, ...,
         0.00000000e+00, 0.00000000e+00, 1.68934196e-01],
        ...,
        [0.00000000e+00, 0.00000000e+00, 1.86894268e-01, ...,
         0.00000000e+00, 0.00000000e+00, 2.71052748e-01],
        [2.84300804e-01, 0.00000000e+00, 0.00000000e+00, ...,
         2.44469017e-01, 1.92485914e-01, 9.82135981e-02],
        [1.36533707e-01, 1.01417536e-02, 2.23434702e-01, ...,
         2.80212998e-01, 0.00000000e+00, 1.18388772e-01]],

       [[1.01866886e-01, 8.40153452e-03, 3.36099118e-01, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [1.97205901e-01, 3.08540463e-01, 0.00000000e+00, ...,
         0.00000000e+00, 6.23972267e-02, 0.00000000e+00],
        [0.00000000e+00, 9.11480337e-02, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 1.46415949e-01],
        ...,
        [0.00000000e+00, 0.00000000e+00, 3.23097338e-03, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [2.26200327e-01, 0.00000000e+00, 0.00000000e+00, ...,
         0.00000000e+00, 1.98774487e-01, 0.00000000e+00],
        [8.80293269e-03, 1.53370097e-01, 0.00000000e+00, ...,
         2.82919168e-01, 0.00000000e+00, 0.00000000e+00]],

       [[1.17725708e-01, 2.23612323e-01, 1.17519675e-02, ...,
         0.00000000e+00, 3.49459708e-01, 7.10227117e-02],
        [9.54425707e-02, 2.67689358e-02, 0.00000000e+00, ...,
         0.00000000e+00, 1.77097246e-01, 0.00000000e+00],
        [1.30967632e-01, 1.29050925e-01, 6.73688576e-02, ...,
         1.02557778e-01, 0.00000000e+00, 1.15729682e-01],
        ...,
        [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 2.39297479e-01],
        [3.21245670e-01, 0.00000000e+00, 1.83343470e-01, ...,
         3.25962126e-01, 1.40042186e-01, 0.00000000e+00],
        [1.80492461e-01, 3.51804972e-01, 2.36293271e-01, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00]],

       ...,

       [[1.42159536e-01, 0.00000000e+00, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [0.00000000e+00, 4.44585770e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [2.04186380e-01, 0.00000000e+00, 6.70287386e-02, ...,
         0.00000000e+00, 0.00000000e+00, 3.81664932e-01],
        ...,
        [0.00000000e+00, 1.49426967e-01, 1.63367286e-01, ...,
         2.98328828e-02, 0.00000000e+00, 0.00000000e+00],
        [1.97943553e-01, 0.00000000e+00, 2.27516845e-01, ...,
         4.64169234e-02, 1.18896528e-03, 0.00000000e+00],
        [2.69571096e-01, 2.77383655e-01, 4.21722472e-01, ...,
         1.52053118e-01, 6.74408153e-02, 0.00000000e+00]],

       [[6.14769608e-02, 2.81049967e-01, 0.00000000e+00, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [2.63842016e-01, 0.00000000e+00, 0.00000000e+00, ...,
         0.00000000e+00, 2.18015909e-02, 0.00000000e+00],
        [7.67226219e-02, 3.32821041e-01, 0.00000000e+00, ...,
         5.93183376e-02, 0.00000000e+00, 4.10942137e-01],
        ...,
        [0.00000000e+00, 9.55804810e-03, 4.89544719e-02, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [2.31722251e-01, 0.00000000e+00, 1.59873813e-01, ...,
         0.00000000e+00, 7.27455840e-02, 9.92300510e-02],
        [2.08125293e-01, 0.00000000e+00, 6.86223879e-02, ...,
         0.00000000e+00, 1.10909343e-04, 0.00000000e+00]],

       [[4.18031871e-01, 5.28196394e-02, 4.48989421e-02, ...,
         0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
        [2.00054914e-01, 1.48861587e-01, 0.00000000e+00, ...,
         0.00000000e+00, 2.44434997e-02, 0.00000000e+00],
        [0.00000000e+00, 6.79483935e-02, 1.54806316e-01, ...,
         0.00000000e+00, 0.00000000e+00, 1.94236770e-01],
        ...,
        [0.00000000e+00, 0.00000000e+00, 5.45343012e-02, ...,
         0.00000000e+00, 0.00000000e+00, 1.19087696e-02],
        [9.12818313e-02, 0.00000000e+00, 4.18430388e-01, ...,
         3.52661490e-01, 3.92701089e-01, 0.00000000e+00],
        [1.65195346e-01, 1.91727772e-01, 1.21707544e-01, ...,
         7.65198320e-02, 5.71975708e-02, 0.00000000e+00]]], dtype=float32)>
generator, discriminator = GAN.layers

for epoch in range(10): # 10 is number of epochs
    print(f"Currently on Epoch {epoch+1}")
    i = 0
    
    for X_batch in dataset:
        
        i = i+1
        
        if i%100 == 0:
            print(f"\t Currently on batch number {i} of {len(my_data)//batch_size}")
        
        # DISCRIMINATOR TRAINING PHASE
        
        noise = tf.random.normal(shape=[batch_size,codings_size]) # GENERATOR gets to see only this random noise
        
        gen_images = generator(noise)
        
        X_fake_vs_real = tf.concat([gen_images, tf.dtypes.cast(X_batch,tf.float32)],axis=0)
        
        y1 = tf.constant([[0.0]]*batch_size + [[1.0]]*batch_size) # 0 correspond to not real and vice-versa
        
        discriminator.trainable = True
        
        discriminator.train_on_batch(X_fake_vs_real,y1)
        
        # TRAIN GENERATOR
        noise = tf.random.normal(shape=[batch_size,codings_size])
        y2 = tf.constant([[1.0]]*batch_size)
        
        discriminator.trainable = False
        
        GAN.train_on_batch(noise,y2)
        
print("Training complete!")        
Currently on Epoch 1
	 Currently on batch number 100 of 185
Currently on Epoch 2
	 Currently on batch number 100 of 185
Currently on Epoch 3
	 Currently on batch number 100 of 185
Currently on Epoch 4
	 Currently on batch number 100 of 185
Currently on Epoch 5
	 Currently on batch number 100 of 185
Currently on Epoch 6
	 Currently on batch number 100 of 185
Currently on Epoch 7
	 Currently on batch number 100 of 185
Currently on Epoch 8
	 Currently on batch number 100 of 185
Currently on Epoch 9
	 Currently on batch number 100 of 185
Currently on Epoch 10
	 Currently on batch number 100 of 185
Training complete!
noise = tf.random.normal(shape=[10, codings_size])
noise.shape
TensorShape([10, 100])
plt.imshow(noise)
<matplotlib.image.AxesImage at 0x2049b9b6080>
image = generator(noise)
image.shape
TensorShape([10, 28, 28])
plt.imshow(image[5])
<matplotlib.image.AxesImage at 0x2049ba875c0>
plt.imshow(image[1])  # Hence, model has undergone 'mode collapse'.
<matplotlib.image.AxesImage at 0x2049bae7a58>
X_train = X_train/255
X_train = X_train.reshape(-1, 28, 28, 1) * 2. - 1.  # Because we will be using 'tanh' later
X_train.min()
-1.0
X_train.max()
1.0
only_zeros = X_train[y_train==0]
only_zeros.shape
(5923, 28, 28, 1)
import tensorflow as tf
from tensorflow.keras.layers import Dense,Reshape,Dropout,LeakyReLU,Flatten,BatchNormalization,Conv2D,Conv2DTranspose
from tensorflow.keras.models import Sequential
np.random.seed(42)
tf.random.set_seed(42)

codings_size = 100
generator = Sequential()
generator.add(Dense(7 * 7 * 128, input_shape=[codings_size]))
generator.add(Reshape([7, 7, 128]))
generator.add(BatchNormalization())
generator.add(Conv2DTranspose(64, kernel_size=5, strides=2, padding="same",
                                 activation="relu"))
generator.add(BatchNormalization())
generator.add(Conv2DTranspose(1, kernel_size=5, strides=2, padding="same",
                                 activation="tanh"))
discriminator = Sequential()
discriminator.add(Conv2D(64, kernel_size=5, strides=2, padding="same",
                        activation=LeakyReLU(0.3),
                        input_shape=[28, 28, 1]))
discriminator.add(Dropout(0.5))
discriminator.add(Conv2D(128, kernel_size=5, strides=2, padding="same",
                        activation=LeakyReLU(0.3)))
discriminator.add(Dropout(0.5))
discriminator.add(Flatten())
discriminator.add(Dense(1, activation="sigmoid"))
GAN = Sequential([generator, discriminator])
discriminator.compile(loss="binary_crossentropy", optimizer="adam")
discriminator.trainable = False
GAN.compile(loss="binary_crossentropy", optimizer="adam")
GAN.layers
[<tensorflow.python.keras.engine.sequential.Sequential at 0x2049bacf240>,
 <tensorflow.python.keras.engine.sequential.Sequential at 0x2049bb33fd0>]
GAN.summary()
Model: "sequential_5"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
sequential_3 (Sequential)    (None, 28, 28, 1)         840705    
_________________________________________________________________
sequential_4 (Sequential)    (None, 1)                 212865    
=================================================================
Total params: 1,053,570
Trainable params: 840,321
Non-trainable params: 213,249
_________________________________________________________________
GAN.layers[0].summary()
Model: "sequential_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_6 (Dense)              (None, 6272)              633472    
_________________________________________________________________
reshape_1 (Reshape)          (None, 7, 7, 128)         0         
_________________________________________________________________
batch_normalization (BatchNo (None, 7, 7, 128)         512       
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 14, 14, 64)        204864    
_________________________________________________________________
batch_normalization_1 (Batch (None, 14, 14, 64)        256       
_________________________________________________________________
conv2d_transpose_1 (Conv2DTr (None, 28, 28, 1)         1601      
=================================================================
Total params: 840,705
Trainable params: 840,321
Non-trainable params: 384
_________________________________________________________________
batch_size = 32
my_data = only_zeros
dataset = tf.data.Dataset.from_tensor_slices(my_data).shuffle(buffer_size=1000)
type(dataset)
tensorflow.python.data.ops.dataset_ops.ShuffleDataset
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)
epochs = 20
generator, discriminator = GAN.layers

# For every epcoh
for epoch in range(epochs):
    print(f"Currently on Epoch {epoch+1}")
    i = 0
    # For every batch in the dataset
    for X_batch in dataset:
        i=i+1
        if i%20 == 0:
            print(f"\tCurrently on batch number {i} of {len(my_data)//batch_size}")
        #####################################
        ## TRAINING THE DISCRIMINATOR ######
        ###################################
        
        # Create Noise
        noise = tf.random.normal(shape=[batch_size, codings_size])
        
        # Generate numbers based just on noise input
        gen_images = generator(noise)
        
        # Concatenate Generated Images against the Real Ones
        # TO use tf.concat, the data types must match!
        X_fake_vs_real = tf.concat([gen_images, tf.dtypes.cast(X_batch,tf.float32)], axis=0)
        
        # Targets set to zero for fake images and 1 for real images
        y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size)
        
        # This gets rid of a Keras warning
        discriminator.trainable = True
        
        # Train the discriminator on this batch
        discriminator.train_on_batch(X_fake_vs_real, y1)
        
        
        #####################################
        ## TRAINING THE GENERATOR     ######
        ###################################
        
        # Create some noise
        noise = tf.random.normal(shape=[batch_size, codings_size])
        
        # We want discriminator to belive that fake images are real
        y2 = tf.constant([[1.]] * batch_size)
        
        # Avois a warning
        discriminator.trainable = False
        
        GAN.train_on_batch(noise, y2)
        
print("TRAINING COMPLETE")            
Currently on Epoch 1
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 2
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 3
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 4
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 5
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 6
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 7
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 8
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 9
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 10
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 11
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 12
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 13
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 14
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 15
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 16
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 17
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 18
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 19
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
Currently on Epoch 20
	Currently on batch number 20 of 185
	Currently on batch number 40 of 185
	Currently on batch number 60 of 185
	Currently on batch number 80 of 185
	Currently on batch number 100 of 185
	Currently on batch number 120 of 185
	Currently on batch number 140 of 185
	Currently on batch number 160 of 185
	Currently on batch number 180 of 185
TRAINING COMPLETE
noise = tf.random.normal(shape=[10, codings_size])
noise.shape
TensorShape([10, 100])
plt.imshow(noise)
<matplotlib.image.AxesImage at 0x2049c9315f8>
images = generator(noise)
single_image = images[0]
for image in images:
    plt.imshow(image.numpy().reshape(28,28))
    plt.show()
plt.imshow(single_image.numpy().reshape(28,28))
<matplotlib.image.AxesImage at 0x204a1c3acc0>