AI Document
AI Document
AI Document
com/drive/
10xJYhBvEtITo7rVZ_f6Id3pv4yJ1ShhD?usp=sharing
This code is used to build the block by using various components in Neural
Network.
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
assert x_train.shape == (60000, 28, 28)-Shape of the Data or Format of the Data
assert x_test.shape == (10000, 28, 28)
assert y_train.shape == (60000,)
assert y_test.shape == (10000,)
from keras.datasets import fashion_mnist
from keras.models import Sequential
from keras.layers import BatchNormalization
from keras.layers import Dense, Reshape, Flatten
from keras.layers.advanced_activations import LeakyReLU
from tensorflow.keras.optimizers import Adam
import numpy as np
!mkdir generated_images
2.We are now creating the variable for the Neural Network.
Img_width=28
Img_height=28
Channels=1
Img_shape=(img_width,imgheight,chanels)
Model=sequential() 99% of the neual networks are sequential (We are going to have sequence of
layers)
Model.add(Desne(np.prod(img_shape),activation=’tanh’))
Model.add(reshape(img_shape))
Model.summery()
Return model
Generator=build_generator()
4. Building Discriminator
Def build_discrimnatir():
Model=Sequential()
Model.add(Dense(512))
Model.add(LeakyReLU(alpha=0.2))
Model.add(Dense(256))
Model.add(LeakyRelu(alpha=0.2))
Model.add(Dense(256))
Model.add(LeakyRelu(alpha=0.2))
Model.add(Dense(256))
Model.add(LeakyRelu(alpha=0.2))
Model.add(Dense(1,activation=’sigmold’))(o=fake,1-real)(squish it between 0 to 1
Model.summary()
Return model
Discriminator=build_discriminator()
Flattern=784
Total parms=533505
We built 2 neural networks and next we need to to doid connect them together
5) connecting Neural Network to build GAN(input and output shape should match)
Discriminator.compile(loss=’binary_crossentrpy’,optimizer=’adam’)
GAN=Sequential()
GAN.add(discriminator)
GAN.comile(loss=binary_crossentropy’,optimizer=’adam”)
binary_crossentropy’=gradient descent
import PIL
save_name=0.00000000
Daf save_imgs(epoch):
R,c=5,5
Noise=np.random.normal(0,1(r*c,latent_dim))
Gen_imgs=generator.predict(noise)
Global save_name
Save_name+=0.00000001
Print(“%.8f” % save_name)
Gen_images=0.5*gen_images+0.5
Fig,axs=pit.subplots(r,c)
Cnt=0
For j in range (c ):
Axs[I,j].imshow(gen_imgs[cnt,:,:,0],cmap=’grey’)
#axs[I,j].imshow(gen_images[cnt])
Axs[I,j].axis(‘off’)
Cnt +=1
Fig,savefig(“generated_images/%8f.png”%save_name)
Print(‘saved’)
Plt.close()
(X_train,_),(_,_)=mnist_load_data()
Print(X_train.shape)
60000,28,28(imgaes,width,height)
Train(50)
Gen_imgs=generator.predict(noise)
Random images and generated images associated with valid and fakes
d_loss_real=discriminator.train_on_batch(imgs,valid)
d_loss_fake=discriminator.train_on_batch(gen_imgs,kakes)
d_loss=np.add(d_loss_real,d_loss_fake)*0.5 (Average)
if(epoch % save_interval)==0
save_imgs(epoch)
#Print(X_train.shape)
Train(30000,batch_size=64,save_interval=200)
https://colab.research.google.com/drive/114cbfMkf56RQRGEpO7FYmss-
2_YKpENc?usp=sharing
from keras.datasets import cifar10, mnist
from keras.models import Sequential
from keras.layers import Reshape
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import Conv2DTranspose
from keras.layers import Dropout
from keras.layers.advanced_activations import LeakyReLU
from tensorflow.keras.optimizers import Adam
import numpy as np
!mkdir generated_images
3) Building Generator
def build_generator():
model.add(dense(256*4*4,input_dim=latent_dim) latent dim is noise,8*8
is initial shape that we are going to reconstruct=how many parameters
you want in output shape
model.add(leakyReLU(alpha=0.2))activation function
model.add(Reshape(4,4,256)))8/8/256
model.add(conv2dTranspose(128,(4,4),strids=(2,2),padding=’same’))
conventional layers=strids(Understand images)
model.add(Reshape((8,8,356)))
model.add(conv2dTranspose(128,(4,4),strids=(2,2),padding=’same’))
model.add(Reshape((8,8,356)))
model.add(conv2dTranspose(128,(4,4),strids=(2,2),padding=’same’))
model.add(Reshape((8,8,356)))
return model
generator = build_generator()
model.add(Conv2D,(128,(3,3),padding=’same’,))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D,(128,(3,3),padding=’same’,))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D,(256,(3,3),padding=’same’,))
model.add(LeakyReLU(alpha=0.2))
model.add(Flatten())
model.add(Dropout(0.4)) dropout 40% neurons and keeping 60% to pass
into final layer (increase stabilization,decrease overfitting
model.add(Dense(1,activation’sigmold’))
model.summary()
return model
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy', optimizer=adam, metri
cs=['accuracy'])