1
- from keras .datasets import cifar10 # importing the dataset from keras
2
- from keras .models import Sequential
3
- from keras .layers import Dense , Dropout , Activation , Flatten
4
- from keras .layers import Conv2D , MaxPooling2D
5
- from keras . callbacks import ModelCheckpoint , TensorBoard
6
- from keras . utils import to_categorical
1
+ from tensorflow . keras .models import Sequential
2
+ from tensorflow . keras .layers import Dense , Dropout , Activation , Flatten
3
+ from tensorflow . keras .layers import Conv2D , MaxPooling2D
4
+ from tensorflow . keras .callbacks import TensorBoard
5
+ import tensorflow as tf
6
+ import tensorflow_datasets as tfds
7
7
import os
8
8
9
9
# hyper-parameters
@@ -79,56 +79,76 @@ def create_model(input_shape):
79
79
# print the summary of the model architecture
80
80
model .summary ()
81
81
82
- # training the model using rmsprop optimizer
83
- model .compile (loss = "categorical_crossentropy " , optimizer = "adam" , metrics = ["accuracy" ])
82
+ # training the model using adam optimizer
83
+ model .compile (loss = "sparse_categorical_crossentropy " , optimizer = "adam" , metrics = ["accuracy" ])
84
84
return model
85
85
86
86
87
87
def load_data ():
88
88
"""
89
- This function loads CIFAR-10 dataset, normalized, and labels one-hot encoded
89
+ This function loads CIFAR-10 dataset, and preprocess it
90
90
"""
91
+ # Loading data using Keras
91
92
# loading the CIFAR-10 dataset, splitted between train and test sets
92
- (X_train , y_train ), (X_test , y_test ) = cifar10 .load_data ()
93
- print ("Training samples:" , X_train .shape [0 ])
94
- print ("Testing samples:" , X_test .shape [0 ])
95
- print (f"Images shape: { X_train .shape [1 :]} " )
96
-
97
- # converting image labels to binary class matrices
98
- y_train = to_categorical (y_train , num_classes )
99
- y_test = to_categorical (y_test , num_classes )
100
-
101
- # convert to floats instead of int, so we can divide by 255
102
- X_train = X_train .astype ("float32" )
103
- X_test = X_test .astype ("float32" )
104
- X_train /= 255
105
- X_test /= 255
93
+ # (X_train, y_train), (X_test, y_test) = cifar10.load_data()
94
+ # print("Training samples:", X_train.shape[0])
95
+ # print("Testing samples:", X_test.shape[0])
96
+ # print(f"Images shape: {X_train.shape[1:]}")
97
+
98
+ # # converting image labels to binary class matrices
99
+ # y_train = to_categorical(y_train, num_classes)
100
+ # y_test = to_categorical(y_test, num_classes)
101
+
102
+ # # convert to floats instead of int, so we can divide by 255
103
+ # X_train = X_train.astype("float32")
104
+ # X_test = X_test.astype("float32")
105
+ # X_train /= 255
106
+ # X_test /= 255
107
+ # return (X_train, y_train), (X_test, y_test)
108
+ # Loading data using Tensorflow Datasets
109
+ def preprocess_image (image , label ):
110
+ # convert [0, 255] range integers to [0, 1] range floats
111
+ image = tf .image .convert_image_dtype (image , tf .float32 )
112
+ return image , label
113
+ # loading the CIFAR-10 dataset, splitted between train and test sets
114
+ ds_train , info = tfds .load ("cifar10" , with_info = True , split = "train" , as_supervised = True )
115
+ ds_test = tfds .load ("cifar10" , split = "test" , as_supervised = True )
116
+ # repeat dataset forever, shuffle, preprocess, split by batch
117
+ ds_train = ds_train .repeat ().shuffle (1024 ).map (preprocess_image ).batch (batch_size )
118
+ ds_test = ds_test .repeat ().shuffle (1024 ).map (preprocess_image ).batch (batch_size )
119
+ return ds_train , ds_test , info
106
120
107
- return (X_train , y_train ), (X_test , y_test )
108
121
109
122
110
123
if __name__ == "__main__" :
111
124
112
125
# load the data
113
- (X_train , y_train ), (X_test , y_test ) = load_data ()
126
+ ds_train , ds_test , info = load_data ()
127
+ # (X_train, y_train), (X_test, y_test) = load_data()
114
128
115
129
# constructs the model
116
- model = create_model (input_shape = X_train .shape [1 :])
130
+ # model = create_model(input_shape=X_train.shape[1:])
131
+ model = create_model (input_shape = info .features ["image" ].shape )
117
132
118
133
# some nice callbacks
119
- tensorboard = TensorBoard (log_dir = "logs/cifar10-model-v1" )
120
- checkpoint = ModelCheckpoint ("results/cifar10-loss-{val_loss:.2f}-acc-{val_acc:.2f}.h5" ,
121
- save_best_only = True ,
122
- verbose = 1 )
134
+ logdir = os .path .join ("logs" , "cifar10-model-v1" )
135
+ tensorboard = TensorBoard (log_dir = logdir )
123
136
124
137
# make sure results folder exist
125
138
if not os .path .isdir ("results" ):
126
139
os .mkdir ("results" )
127
140
128
141
# train
129
- model .fit (X_train , y_train ,
130
- batch_size = batch_size ,
131
- epochs = epochs ,
132
- validation_data = (X_test , y_test ),
133
- callbacks = [tensorboard , checkpoint ],
134
- shuffle = True )
142
+ # model.fit(X_train, y_train,
143
+ # batch_size=batch_size,
144
+ # epochs=epochs,
145
+ # validation_data=(X_test, y_test),
146
+ # callbacks=[tensorboard, checkpoint],
147
+ # shuffle=True)
148
+ model .fit (ds_train , epochs = epochs , validation_data = ds_test , verbose = 1 ,
149
+ steps_per_epoch = info .splits ["train" ].num_examples // batch_size ,
150
+ validation_steps = info .splits ["test" ].num_examples // batch_size ,
151
+ callbacks = [tensorboard ])
152
+
153
+ # save the model to disk
154
+ model .save ("results/cifar10-model-v1.h5" )
0 commit comments