0% found this document useful (0 votes)
8 views

AM19_ADL_u-net-model

Unet Model Generative aI
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
8 views

AM19_ADL_u-net-model

Unet Model Generative aI
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 37

am19-adl-u-net-model

November 29, 2024

[ ]: !pip install -q tensorflow tensorflow-datasets

[ ]: import tensorflow as tf
from tensorflow.keras import layers,models
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt

[ ]: dataset, info = tfds.load('oxford_iiit_pet:3.*.*',with_info=True)

Downloading and preparing dataset 773.52 MiB (download: 773.52 MiB, generated:
774.69 MiB, total: 1.51 GiB) to
/root/tensorflow_datasets/oxford_iiit_pet/3.2.0…
Dl Completed…: 0 url [00:00, ? url/s]
Dl Size…: 0 MiB [00:00, ? MiB/s]
Extraction completed…: 0 file [00:00, ? file/s]
Generating splits…: 0%| | 0/2 [00:00<?, ? splits/s]
Generating train examples…: 0%| | 0/3680 [00:00<?, ? examples/s]
Shuffling /root/tensorflow_datasets/oxford_iiit_pet/incomplete.PER88Y_3.2.0/
↪oxford_iiit_pet-train.tfrecord*……

Generating test examples…: 0%| | 0/3669 [00:00<?, ? examples/s]


Shuffling /root/tensorflow_datasets/oxford_iiit_pet/incomplete.PER88Y_3.2.0/
↪oxford_iiit_pet-test.tfrecord*…:…

Dataset oxford_iiit_pet downloaded and prepared to


/root/tensorflow_datasets/oxford_iiit_pet/3.2.0. Subsequent calls will reuse
this data.

[ ]: def normalize(input_image, input_mask):


input_image = tf.cast(input_image,tf.float32) / 255.0
input_mask -= 1
return input_image, input_mask

def load_image_train(datapoint):
input_image = tf.image.resize(datapoint['image'],(128,128))

1
input_mask = tf.image.resize(datapoint['segmentation_mask'],(128,128))
input_image,input_mask = normalize(input_image,input_mask)
return input_image,input_mask

def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'],(128,128))
input_mask= tf.image.resize(datapoint['segmentation_mask'],(128,128))
input_image,input_mask = normalize(input_image,input_mask)
return input_image,input_mask

[ ]: TRAIN_LENGTH = info.splits['train'].num_examples
BATCH_SIZE = 64
BUFFER_SIZE = 1000

train=dataset['train'].map(load_image_train,num_parallel_calls=tf.data.AUTOTUNE)
test=dataset['test'].map(load_image_test)

train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.
↪AUTOTUNE)

test_dataset = test.batch(BATCH_SIZE)

[ ]: from IPython.display import Image


Image('/content/unet.png')
[ ]:

2
[ ]: def unet_model(output_channels):
inputs = tf.keras.layers.Input(shape=[128,128,3])

#Downsampling
conv1 = layers.Conv2D(64,(3,3),padding='same')(inputs)
conv1 = layers.BatchNormalization()(conv1)
conv1 = layers.ReLU()(conv1)
conv1 = layers.Conv2D(64,(3,3),padding='same')(conv1)
conv1 = layers.BatchNormalization()(conv1)
conv1 = layers.ReLU()(conv1)
pool1 = layers.MaxPooling2D((2,2))(conv1)

conv2 = layers.Conv2D(128,(3,3),padding='same')(pool1)
conv2 = layers.BatchNormalization()(conv2)
conv2 = layers.ReLU()(conv2)
conv2 = layers.Conv2D(128,(3,3),padding='same')(conv2)
conv2 = layers.BatchNormalization()(conv2)
conv2 = layers.ReLU()(conv2)
pool2 = layers.MaxPooling2D((2,2))(conv2)

conv3 = layers.Conv2D(256,(3,3),padding='same')(pool2)
conv3 = layers.BatchNormalization()(conv3)
conv3 = layers.ReLU()(conv3)
conv3 = layers.Conv2D(256,(3,3),padding='same')(conv3)
conv3 = layers.BatchNormalization()(conv3)
conv3 = layers.ReLU()(conv3)
pool3= layers.MaxPooling2D((2,2))(conv3)

conv4 = layers.Conv2D(512,(3,3),padding='same')(pool3)
conv4 = layers.BatchNormalization()(conv4)
conv4 = layers.ReLU()(conv4)
conv4 = layers.Conv2D(512,(3,3),padding='same')(conv4)
conv4 = layers.BatchNormalization()(conv4)
conv4 = layers.ReLU()(conv4)
pool4 =layers.MaxPooling2D((2,2))(conv4)

#Bottleneck
conv5 = layers.Conv2D(1024,(3,3),padding='same')(pool4)
conv5 = layers.BatchNormalization()(conv5)
conv5 = layers.ReLU()(conv5)
conv5 = layers.Conv2D(1024,(3,3),padding='same')(conv5)
conv5 = layers.BatchNormalization()(conv5)
conv5 = layers.ReLU()(conv5)

#Upsampling
up6 = layers.Conv2DTranspose(512,(2,2),strides=(2,2),padding='same')(conv5)

3
up6 = layers.concatenate([up6,conv4],axis=3)
conv6 =layers.Conv2D(512,(3,3),padding='same')(up6)
conv6 = layers.BatchNormalization()(conv6)
conv6 = layers.ReLU()(conv6)
conv6 = layers.Conv2D(512,(3,3),padding='same')(conv6)
conv6 = layers.BatchNormalization()(conv6)
conv6 = layers.ReLU()(conv6)

up7 = layers.Conv2DTranspose(256,(2,2),strides=(2,2),padding='same')(conv6)
up7 = layers.concatenate([up7,conv3],axis=3)
conv7 =layers.Conv2D(256,(3,3),padding='same')(up7)
conv7 = layers.BatchNormalization()(conv7)
conv7 = layers.ReLU()(conv7)
conv7 = layers.Conv2D(256,(3,3),padding='same')(conv7)
conv7 = layers.BatchNormalization()(conv7)
conv7 = layers.ReLU()(conv7)

up8 = layers.Conv2DTranspose(128,(2,2),strides=(2,2),padding='same')(conv7)
up8 = layers.concatenate([up8,conv2],axis=3)
conv8 =layers.Conv2D(128,(3,3),padding='same')(up8)
conv8 = layers.BatchNormalization()(conv8)
conv8 = layers.ReLU()(conv8)
conv8 = layers.Conv2D(128,(3,3),padding='same')(conv8)
conv8 = layers.BatchNormalization()(conv8)
conv8 = layers.ReLU()(conv8)

up9 = layers.Conv2DTranspose(64,(2,2),strides=(2,2),padding='same')(conv8)
up9 = layers.concatenate([up9,conv1],axis=3)
conv9 =layers.Conv2D(64,(3,3),padding='same')(up9)
conv9 = layers.BatchNormalization()(conv9)
conv9 = layers.ReLU()(conv9)
conv9 = layers.Conv2D(64,(3,3),padding='same')(conv9)
conv9 = layers.BatchNormalization()(conv9)
conv9 = layers.ReLU()(conv9)

outputs = layers.Conv2D(output_channels,(1,1),activation='softmax')(conv9)

return models.Model(inputs=[inputs],outputs=[outputs])

model = unet_model(3)

[ ]: model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)

4
[ ]: def display_image_and_mask(image,mask):
#plot the image
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.title('Input image')
plt.imshow(tf.keras.utils.array_to_img(image))
plt.axis('off')

#plot the mask


plt.subplot(1,2,2)
plt.title('Groud Truth mask')
plt.imshow(tf.keras.utils.array_to_img(mask))
plt.axis('off')

plt.show()

[ ]: def visualize_sample_from_dataset(dataset, num_samples=3):


for image_batch, mask_batch in dataset.take(1): #Take a single batch
for i in range(num_samples,len(image_batch)):
image = image_batch[i]
mask = mask_batch[i]
display_image_and_mask(image,mask)

[ ]: #visualize sample from the test dataset


visualize_sample_from_dataset(test_dataset,num_samples=5)

5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
[ ]: EPOCHS= 20
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
model.fit(train_dataset,epochs=EPOCHS,steps_per_epoch=STEPS_PER_EPOCH,␣
↪validation_data=test_dataset)

Epoch 1/20
57/57 �������������������� 186s 1s/step -
accuracy: 0.5362 - loss: 1.0047 - val_accuracy: 0.3279 - val_loss: 4.1948
Epoch 2/20
57/57 �������������������� 122s 1s/step -
accuracy: 0.7482 - loss: 0.6003 - val_accuracy: 0.3768 - val_loss: 3.9073

34
Epoch 3/20
57/57 �������������������� 64s 1s/step -
accuracy: 0.7797 - loss: 0.5313 - val_accuracy: 0.5626 - val_loss: 2.9311
Epoch 4/20
57/57 �������������������� 64s 1s/step -
accuracy: 0.7953 - loss: 0.4959 - val_accuracy: 0.4984 - val_loss: 2.1828
Epoch 5/20
57/57 �������������������� 64s 1s/step -
accuracy: 0.8187 - loss: 0.4387 - val_accuracy: 0.6877 - val_loss: 0.8231
Epoch 6/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8294 - loss: 0.4143 - val_accuracy: 0.6909 - val_loss: 1.0621
Epoch 7/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8405 - loss: 0.3860 - val_accuracy: 0.7763 - val_loss: 0.6371
Epoch 8/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8498 - loss: 0.3643 - val_accuracy: 0.8004 - val_loss: 0.5189
Epoch 9/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8610 - loss: 0.3372 - val_accuracy: 0.8430 - val_loss: 0.3994
Epoch 10/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8663 - loss: 0.3224 - val_accuracy: 0.7583 - val_loss: 0.8012
Epoch 11/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8672 - loss: 0.3209 - val_accuracy: 0.8625 - val_loss: 0.3514
Epoch 12/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8761 - loss: 0.2989 - val_accuracy: 0.8701 - val_loss: 0.3364
Epoch 13/20
57/57 �������������������� 66s 1s/step -
accuracy: 0.8806 - loss: 0.2869 - val_accuracy: 0.8239 - val_loss: 0.4868
Epoch 14/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8888 - loss: 0.2670 - val_accuracy: 0.8711 - val_loss: 0.3257
Epoch 15/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8868 - loss: 0.2703 - val_accuracy: 0.8615 - val_loss: 0.3705
Epoch 16/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8907 - loss: 0.2583 - val_accuracy: 0.8645 - val_loss: 0.3610
Epoch 17/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8971 - loss: 0.2446 - val_accuracy: 0.8541 - val_loss: 0.3744
Epoch 18/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8988 - loss: 0.2391 - val_accuracy: 0.8706 - val_loss: 0.3344

35
Epoch 19/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.9024 - loss: 0.2304 - val_accuracy: 0.8649 - val_loss: 0.3600
Epoch 20/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.9038 - loss: 0.2263 - val_accuracy: 0.8750 - val_loss: 0.3326

[ ]: <keras.src.callbacks.history.History at 0x7956e7e624d0>

[ ]: #Evaluate the model on the test dataset


model.evaluate(test_dataset)

#Predicitng masks on test images


def create_mask(pred_mask):
pred_mask = tf.argmax(pred_mask,axis=-1)
pred_mask = pred_mask[...,tf.newaxis]
return pred_mask[0]

def display(display_list):
plt.figure(figsize=(15,15))

title = ['Input Image','True Mask','Predicted Mask']

for i in range(len(display_list)):
plt.subplot(1,len(display_list),i+1)
plt.title(title[i])
plt.imshow(tf.keras.utils.array_to_img(display_list[i]))
plt.axis('off')
plt.show()

#Function to show predictions


def show_predictions(dataset=None, num=1):
if dataset:
for image,mask in dataset.take(num):
pred_mask = model.predict(image)
display([image[0],mask[0],create_mask(pred_mask)])
else:
for i in range(num):
sample_image, sample_mask =next(iter(test_dataset))
display([sample_image[0],sample_mask[0],create_mask(model.
↪predict(sample_image))])

#Display predictions on test images


show_predictions(test_dataset,num=3)

58/58 �������������������� 20s 336ms/step -


accuracy: 0.8765 - loss: 0.3261
2/2 �������������������� 2s 218ms/step

36
2/2 �������������������� 0s 144ms/step

2/2 �������������������� 0s 148ms/step

37

You might also like