1
+ import math
2
+ import numpy as np
3
+ import h5py
4
+ import matplotlib .pyplot as plt
5
+ import tensorflow as tf
6
+ from tensorflow .python .framework import ops
7
+
8
+ def load_dataset ():
9
+ train_dataset = h5py .File ('datasets/train_signs.h5' , "r" )
10
+ train_set_x_orig = np .array (train_dataset ["train_set_x" ][:]) # your train set features
11
+ train_set_y_orig = np .array (train_dataset ["train_set_y" ][:]) # your train set labels
12
+
13
+ test_dataset = h5py .File ('datasets/test_signs.h5' , "r" )
14
+ test_set_x_orig = np .array (test_dataset ["test_set_x" ][:]) # your test set features
15
+ test_set_y_orig = np .array (test_dataset ["test_set_y" ][:]) # your test set labels
16
+
17
+ classes = np .array (test_dataset ["list_classes" ][:]) # the list of classes
18
+
19
+ train_set_y_orig = train_set_y_orig .reshape ((1 , train_set_y_orig .shape [0 ]))
20
+ test_set_y_orig = test_set_y_orig .reshape ((1 , test_set_y_orig .shape [0 ]))
21
+
22
+ return train_set_x_orig , train_set_y_orig , test_set_x_orig , test_set_y_orig , classes
23
+
24
+
25
+ def random_mini_batches (X , Y , mini_batch_size = 64 , seed = 0 ):
26
+ """
27
+ Creates a list of random minibatches from (X, Y)
28
+
29
+ Arguments:
30
+ X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)
31
+ Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y)
32
+ mini_batch_size - size of the mini-batches, integer
33
+ seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
34
+
35
+ Returns:
36
+ mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
37
+ """
38
+
39
+ m = X .shape [0 ] # number of training examples
40
+ mini_batches = []
41
+ np .random .seed (seed )
42
+
43
+ # Step 1: Shuffle (X, Y)
44
+ permutation = list (np .random .permutation (m ))
45
+ shuffled_X = X [permutation ,:,:,:]
46
+ shuffled_Y = Y [permutation ,:]
47
+
48
+ # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
49
+ num_complete_minibatches = math .floor (m / mini_batch_size ) # number of mini batches of size mini_batch_size in your partitionning
50
+ for k in range (0 , num_complete_minibatches ):
51
+ mini_batch_X = shuffled_X [k * mini_batch_size : k * mini_batch_size + mini_batch_size ,:,:,:]
52
+ mini_batch_Y = shuffled_Y [k * mini_batch_size : k * mini_batch_size + mini_batch_size ,:]
53
+ mini_batch = (mini_batch_X , mini_batch_Y )
54
+ mini_batches .append (mini_batch )
55
+
56
+ # Handling the end case (last mini-batch < mini_batch_size)
57
+ if m % mini_batch_size != 0 :
58
+ mini_batch_X = shuffled_X [num_complete_minibatches * mini_batch_size : m ,:,:,:]
59
+ mini_batch_Y = shuffled_Y [num_complete_minibatches * mini_batch_size : m ,:]
60
+ mini_batch = (mini_batch_X , mini_batch_Y )
61
+ mini_batches .append (mini_batch )
62
+
63
+ return mini_batches
64
+
65
+
66
+ def convert_to_one_hot (Y , C ):
67
+ Y = np .eye (C )[Y .reshape (- 1 )].T
68
+ return Y
69
+
70
+
71
+ def forward_propagation_for_predict (X , parameters ):
72
+ """
73
+ Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
74
+
75
+ Arguments:
76
+ X -- input dataset placeholder, of shape (input size, number of examples)
77
+ parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
78
+ the shapes are given in initialize_parameters
79
+
80
+ Returns:
81
+ Z3 -- the output of the last LINEAR unit
82
+ """
83
+
84
+ # Retrieve the parameters from the dictionary "parameters"
85
+ W1 = parameters ['W1' ]
86
+ b1 = parameters ['b1' ]
87
+ W2 = parameters ['W2' ]
88
+ b2 = parameters ['b2' ]
89
+ W3 = parameters ['W3' ]
90
+ b3 = parameters ['b3' ]
91
+ # Numpy Equivalents:
92
+ Z1 = tf .add (tf .matmul (W1 , X ), b1 ) # Z1 = np.dot(W1, X) + b1
93
+ A1 = tf .nn .relu (Z1 ) # A1 = relu(Z1)
94
+ Z2 = tf .add (tf .matmul (W2 , A1 ), b2 ) # Z2 = np.dot(W2, a1) + b2
95
+ A2 = tf .nn .relu (Z2 ) # A2 = relu(Z2)
96
+ Z3 = tf .add (tf .matmul (W3 , A2 ), b3 ) # Z3 = np.dot(W3,Z2) + b3
97
+
98
+ return Z3
99
+
100
+ def predict (X , parameters ):
101
+
102
+ W1 = tf .convert_to_tensor (parameters ["W1" ])
103
+ b1 = tf .convert_to_tensor (parameters ["b1" ])
104
+ W2 = tf .convert_to_tensor (parameters ["W2" ])
105
+ b2 = tf .convert_to_tensor (parameters ["b2" ])
106
+ W3 = tf .convert_to_tensor (parameters ["W3" ])
107
+ b3 = tf .convert_to_tensor (parameters ["b3" ])
108
+
109
+ params = {"W1" : W1 ,
110
+ "b1" : b1 ,
111
+ "W2" : W2 ,
112
+ "b2" : b2 ,
113
+ "W3" : W3 ,
114
+ "b3" : b3 }
115
+
116
+ x = tf .placeholder ("float" , [12288 , 1 ])
117
+
118
+ z3 = forward_propagation_for_predict (x , params )
119
+ p = tf .argmax (z3 )
120
+
121
+ sess = tf .Session ()
122
+ prediction = sess .run (p , feed_dict = {x : X })
123
+
124
+ return prediction
125
+
126
+ #def predict(X, parameters):
127
+ #
128
+ # W1 = tf.convert_to_tensor(parameters["W1"])
129
+ # b1 = tf.convert_to_tensor(parameters["b1"])
130
+ # W2 = tf.convert_to_tensor(parameters["W2"])
131
+ # b2 = tf.convert_to_tensor(parameters["b2"])
132
+ ## W3 = tf.convert_to_tensor(parameters["W3"])
133
+ ## b3 = tf.convert_to_tensor(parameters["b3"])
134
+ #
135
+ ## params = {"W1": W1,
136
+ ## "b1": b1,
137
+ ## "W2": W2,
138
+ ## "b2": b2,
139
+ ## "W3": W3,
140
+ ## "b3": b3}
141
+ #
142
+ # params = {"W1": W1,
143
+ # "b1": b1,
144
+ # "W2": W2,
145
+ # "b2": b2}
146
+ #
147
+ # x = tf.placeholder("float", [12288, 1])
148
+ #
149
+ # z3 = forward_propagation(x, params)
150
+ # p = tf.argmax(z3)
151
+ #
152
+ # with tf.Session() as sess:
153
+ # prediction = sess.run(p, feed_dict = {x: X})
154
+ #
155
+ # return prediction
0 commit comments