AM19_ADL_u-net-model
AM19_ADL_u-net-model
[ ]: import tensorflow as tf
from tensorflow.keras import layers,models
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
Downloading and preparing dataset 773.52 MiB (download: 773.52 MiB, generated:
774.69 MiB, total: 1.51 GiB) to
/root/tensorflow_datasets/oxford_iiit_pet/3.2.0…
Dl Completed…: 0 url [00:00, ? url/s]
Dl Size…: 0 MiB [00:00, ? MiB/s]
Extraction completed…: 0 file [00:00, ? file/s]
Generating splits…: 0%| | 0/2 [00:00<?, ? splits/s]
Generating train examples…: 0%| | 0/3680 [00:00<?, ? examples/s]
Shuffling /root/tensorflow_datasets/oxford_iiit_pet/incomplete.PER88Y_3.2.0/
↪oxford_iiit_pet-train.tfrecord*……
def load_image_train(datapoint):
input_image = tf.image.resize(datapoint['image'],(128,128))
1
input_mask = tf.image.resize(datapoint['segmentation_mask'],(128,128))
input_image,input_mask = normalize(input_image,input_mask)
return input_image,input_mask
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'],(128,128))
input_mask= tf.image.resize(datapoint['segmentation_mask'],(128,128))
input_image,input_mask = normalize(input_image,input_mask)
return input_image,input_mask
[ ]: TRAIN_LENGTH = info.splits['train'].num_examples
BATCH_SIZE = 64
BUFFER_SIZE = 1000
train=dataset['train'].map(load_image_train,num_parallel_calls=tf.data.AUTOTUNE)
test=dataset['test'].map(load_image_test)
train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.
↪AUTOTUNE)
test_dataset = test.batch(BATCH_SIZE)
2
[ ]: def unet_model(output_channels):
inputs = tf.keras.layers.Input(shape=[128,128,3])
#Downsampling
conv1 = layers.Conv2D(64,(3,3),padding='same')(inputs)
conv1 = layers.BatchNormalization()(conv1)
conv1 = layers.ReLU()(conv1)
conv1 = layers.Conv2D(64,(3,3),padding='same')(conv1)
conv1 = layers.BatchNormalization()(conv1)
conv1 = layers.ReLU()(conv1)
pool1 = layers.MaxPooling2D((2,2))(conv1)
conv2 = layers.Conv2D(128,(3,3),padding='same')(pool1)
conv2 = layers.BatchNormalization()(conv2)
conv2 = layers.ReLU()(conv2)
conv2 = layers.Conv2D(128,(3,3),padding='same')(conv2)
conv2 = layers.BatchNormalization()(conv2)
conv2 = layers.ReLU()(conv2)
pool2 = layers.MaxPooling2D((2,2))(conv2)
conv3 = layers.Conv2D(256,(3,3),padding='same')(pool2)
conv3 = layers.BatchNormalization()(conv3)
conv3 = layers.ReLU()(conv3)
conv3 = layers.Conv2D(256,(3,3),padding='same')(conv3)
conv3 = layers.BatchNormalization()(conv3)
conv3 = layers.ReLU()(conv3)
pool3= layers.MaxPooling2D((2,2))(conv3)
conv4 = layers.Conv2D(512,(3,3),padding='same')(pool3)
conv4 = layers.BatchNormalization()(conv4)
conv4 = layers.ReLU()(conv4)
conv4 = layers.Conv2D(512,(3,3),padding='same')(conv4)
conv4 = layers.BatchNormalization()(conv4)
conv4 = layers.ReLU()(conv4)
pool4 =layers.MaxPooling2D((2,2))(conv4)
#Bottleneck
conv5 = layers.Conv2D(1024,(3,3),padding='same')(pool4)
conv5 = layers.BatchNormalization()(conv5)
conv5 = layers.ReLU()(conv5)
conv5 = layers.Conv2D(1024,(3,3),padding='same')(conv5)
conv5 = layers.BatchNormalization()(conv5)
conv5 = layers.ReLU()(conv5)
#Upsampling
up6 = layers.Conv2DTranspose(512,(2,2),strides=(2,2),padding='same')(conv5)
3
up6 = layers.concatenate([up6,conv4],axis=3)
conv6 =layers.Conv2D(512,(3,3),padding='same')(up6)
conv6 = layers.BatchNormalization()(conv6)
conv6 = layers.ReLU()(conv6)
conv6 = layers.Conv2D(512,(3,3),padding='same')(conv6)
conv6 = layers.BatchNormalization()(conv6)
conv6 = layers.ReLU()(conv6)
up7 = layers.Conv2DTranspose(256,(2,2),strides=(2,2),padding='same')(conv6)
up7 = layers.concatenate([up7,conv3],axis=3)
conv7 =layers.Conv2D(256,(3,3),padding='same')(up7)
conv7 = layers.BatchNormalization()(conv7)
conv7 = layers.ReLU()(conv7)
conv7 = layers.Conv2D(256,(3,3),padding='same')(conv7)
conv7 = layers.BatchNormalization()(conv7)
conv7 = layers.ReLU()(conv7)
up8 = layers.Conv2DTranspose(128,(2,2),strides=(2,2),padding='same')(conv7)
up8 = layers.concatenate([up8,conv2],axis=3)
conv8 =layers.Conv2D(128,(3,3),padding='same')(up8)
conv8 = layers.BatchNormalization()(conv8)
conv8 = layers.ReLU()(conv8)
conv8 = layers.Conv2D(128,(3,3),padding='same')(conv8)
conv8 = layers.BatchNormalization()(conv8)
conv8 = layers.ReLU()(conv8)
up9 = layers.Conv2DTranspose(64,(2,2),strides=(2,2),padding='same')(conv8)
up9 = layers.concatenate([up9,conv1],axis=3)
conv9 =layers.Conv2D(64,(3,3),padding='same')(up9)
conv9 = layers.BatchNormalization()(conv9)
conv9 = layers.ReLU()(conv9)
conv9 = layers.Conv2D(64,(3,3),padding='same')(conv9)
conv9 = layers.BatchNormalization()(conv9)
conv9 = layers.ReLU()(conv9)
outputs = layers.Conv2D(output_channels,(1,1),activation='softmax')(conv9)
return models.Model(inputs=[inputs],outputs=[outputs])
model = unet_model(3)
[ ]: model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
4
[ ]: def display_image_and_mask(image,mask):
#plot the image
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.title('Input image')
plt.imshow(tf.keras.utils.array_to_img(image))
plt.axis('off')
plt.show()
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
[ ]: EPOCHS= 20
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
model.fit(train_dataset,epochs=EPOCHS,steps_per_epoch=STEPS_PER_EPOCH,␣
↪validation_data=test_dataset)
Epoch 1/20
57/57 �������������������� 186s 1s/step -
accuracy: 0.5362 - loss: 1.0047 - val_accuracy: 0.3279 - val_loss: 4.1948
Epoch 2/20
57/57 �������������������� 122s 1s/step -
accuracy: 0.7482 - loss: 0.6003 - val_accuracy: 0.3768 - val_loss: 3.9073
34
Epoch 3/20
57/57 �������������������� 64s 1s/step -
accuracy: 0.7797 - loss: 0.5313 - val_accuracy: 0.5626 - val_loss: 2.9311
Epoch 4/20
57/57 �������������������� 64s 1s/step -
accuracy: 0.7953 - loss: 0.4959 - val_accuracy: 0.4984 - val_loss: 2.1828
Epoch 5/20
57/57 �������������������� 64s 1s/step -
accuracy: 0.8187 - loss: 0.4387 - val_accuracy: 0.6877 - val_loss: 0.8231
Epoch 6/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8294 - loss: 0.4143 - val_accuracy: 0.6909 - val_loss: 1.0621
Epoch 7/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8405 - loss: 0.3860 - val_accuracy: 0.7763 - val_loss: 0.6371
Epoch 8/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8498 - loss: 0.3643 - val_accuracy: 0.8004 - val_loss: 0.5189
Epoch 9/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8610 - loss: 0.3372 - val_accuracy: 0.8430 - val_loss: 0.3994
Epoch 10/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8663 - loss: 0.3224 - val_accuracy: 0.7583 - val_loss: 0.8012
Epoch 11/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8672 - loss: 0.3209 - val_accuracy: 0.8625 - val_loss: 0.3514
Epoch 12/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8761 - loss: 0.2989 - val_accuracy: 0.8701 - val_loss: 0.3364
Epoch 13/20
57/57 �������������������� 66s 1s/step -
accuracy: 0.8806 - loss: 0.2869 - val_accuracy: 0.8239 - val_loss: 0.4868
Epoch 14/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8888 - loss: 0.2670 - val_accuracy: 0.8711 - val_loss: 0.3257
Epoch 15/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8868 - loss: 0.2703 - val_accuracy: 0.8615 - val_loss: 0.3705
Epoch 16/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.8907 - loss: 0.2583 - val_accuracy: 0.8645 - val_loss: 0.3610
Epoch 17/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8971 - loss: 0.2446 - val_accuracy: 0.8541 - val_loss: 0.3744
Epoch 18/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.8988 - loss: 0.2391 - val_accuracy: 0.8706 - val_loss: 0.3344
35
Epoch 19/20
57/57 �������������������� 65s 1s/step -
accuracy: 0.9024 - loss: 0.2304 - val_accuracy: 0.8649 - val_loss: 0.3600
Epoch 20/20
57/57 �������������������� 67s 1s/step -
accuracy: 0.9038 - loss: 0.2263 - val_accuracy: 0.8750 - val_loss: 0.3326
[ ]: <keras.src.callbacks.history.History at 0x7956e7e624d0>
def display(display_list):
plt.figure(figsize=(15,15))
for i in range(len(display_list)):
plt.subplot(1,len(display_list),i+1)
plt.title(title[i])
plt.imshow(tf.keras.utils.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
36
2/2 �������������������� 0s 144ms/step
37