Experiment 3.4
Experiment 3.4
Experiment 3.4
CO1: Identify and describe soft computing techniques and their roles in building intelligent
machines.
AIM:
Write a program to implement Generative Adversarial Networks (GANs) using genetic
algorithms.
Theory
Generative Adversarial Networks (GANs) are a class of machine learning frameworks
designed by Ian Goodfellow and his colleagues in 2014. GANs consist of two neural
networks: a generator and a discriminator. The generator creates fake data, while the
discriminator evaluates the authenticity of the data. Genetic algorithms (GAs) are
optimization techniques inspired by the process of natural selection, which can be used to
optimize the GANs' parameters by evolving a population of candidate solutions.
Procedure:
1. Install Anaconda:
o Follow the same installation steps as provided in EXPERIMENT – 1.1.
sh
Copy code
conda install tensorflow keras numpy matplotlib
pip install deap
python
Copy code
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Reshape, Flatten, Dropout,
BatchNormalization, LeakyReLU
from tensorflow.keras.models import Sequential
from deap import base, creator, tools, algorithms
import random
import matplotlib.pyplot as plt
def build_discriminator(img_shape):
model = Sequential([
Flatten(input_shape=img_shape),
Dense(512),
LeakyReLU(alpha=0.2),
Dropout(0.4),
Dense(256),
LeakyReLU(alpha=0.2),
Dropout(0.4),
Dense(1, activation='sigmoid')
])
return model
python
Copy code
(x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
x_train = (x_train.astype(np.float32) - 127.5) / 127.5 # Normalize
to [-1, 1]
x_train = np.expand_dims(x_train, axis=3)
python
Copy code
latent_dim = 100
img_shape = (28, 28, 1)
generator = build_generator(latent_dim)
discriminator = build_discriminator(img_shape)
discriminator.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
z = tf.keras.Input(shape=(latent_dim,))
img = generator(z)
discriminator.trainable = False
validity = discriminator(img)
python
Copy code
def evolve_population(population, toolbox, ngen=10, cxpb=0.5,
mutpb=0.2):
for gen in range(ngen):
offspring = algorithms.varAnd(population, toolbox, cxpb=cxpb,
mutpb=mutpb)
fits = toolbox.map(toolbox.evaluate, offspring)
for fit, ind in zip(fits, offspring):
ind.fitness.values = fit
population = toolbox.select(offspring, k=len(population))
return population
toolbox = base.Toolbox()
toolbox.register("attr_float", random.uniform, -1.0, 1.0)
toolbox.register("individual", tools.initRepeat,
creator.Individual, toolbox.attr_float, latent_dim)
toolbox.register("population", tools.initRepeat, list,
toolbox.individual)
def evaluate(individual):
noise = np.array(individual).reshape(1, latent_dim)
generated_image = generator.predict(noise)
return discriminator.predict(generated_image),
toolbox.register("evaluate", evaluate)
toolbox.register("mate", tools.cxBlend, alpha=0.5)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=0.2,
indpb=0.2)
toolbox.register("select", tools.selTournament, tournsize=3)
population = toolbox.population(n=population_size)
d_loss_real = discriminator.train_on_batch(real_images,
real_labels)
d_loss_fake = discriminator.train_on_batch(fake_images,
fake_labels)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
if epoch % 100 == 0:
print(f"Epoch {epoch} [D loss: {d_loss[0]} | D accuracy:
{100*d_loss[1]}] [G loss: {g_loss}]")
return population
python
Copy code
epochs = 10000
batch_size = 64
population_size = 20
generations = 5
python
Copy code
def plot_generated_images(generator, examples=10, dim=(1, 10),
figsize=(10, 1)):
noise = np.random.normal(0, 1, (examples,
generator.input_shape[1]))
generated_images = generator.predict(noise)
plt.figure(figsize=figsize)
for i in range(examples):
plt.subplot(dim[0], dim[1], i + 1)
plt.imshow(generated_images[i].reshape(28, 28),
interpolation='nearest', cmap='gray')
plt.axis('off')
plt.tight_layout()
plt.show()
plot_generated_images(generator)
Video Tutorial
Further Reading
Rolon-Mérette, D., Ross, M., Rolon-Mérette, T., & Church, K. (2016). Introduction to
Anaconda and Python: Installation and setup. Python for research in psychology, 16(5), S5-
S11.