Skip to content

Commit 13b7d4c

Browse files
committed
Deep Neural Network Application
Cat classifier Week 4 - Course 1
1 parent 9d0ef62 commit 13b7d4c

30 files changed

+3458
-0
lines changed

Week4/Building your Deep Neural Network - Step by Step/Building+your+Deep+Neural+Network+-+Step+by+Step+v5.ipynb

Lines changed: 1523 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
import numpy as np
2+
3+
def sigmoid(Z):
4+
"""
5+
Implements the sigmoid activation in numpy
6+
7+
Arguments:
8+
Z -- numpy array of any shape
9+
10+
Returns:
11+
A -- output of sigmoid(z), same shape as Z
12+
cache -- returns Z as well, useful during backpropagation
13+
"""
14+
15+
A = 1/(1+np.exp(-Z))
16+
cache = Z
17+
18+
return A, cache
19+
20+
def relu(Z):
21+
"""
22+
Implement the RELU function.
23+
24+
Arguments:
25+
Z -- Output of the linear layer, of any shape
26+
27+
Returns:
28+
A -- Post-activation parameter, of the same shape as Z
29+
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
30+
"""
31+
32+
A = np.maximum(0,Z)
33+
34+
assert(A.shape == Z.shape)
35+
36+
cache = Z
37+
return A, cache
38+
39+
40+
def relu_backward(dA, cache):
41+
"""
42+
Implement the backward propagation for a single RELU unit.
43+
44+
Arguments:
45+
dA -- post-activation gradient, of any shape
46+
cache -- 'Z' where we store for computing backward propagation efficiently
47+
48+
Returns:
49+
dZ -- Gradient of the cost with respect to Z
50+
"""
51+
52+
Z = cache
53+
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
54+
55+
# When z <= 0, you should set dz to 0 as well.
56+
dZ[Z <= 0] = 0
57+
58+
assert (dZ.shape == Z.shape)
59+
60+
return dZ
61+
62+
def sigmoid_backward(dA, cache):
63+
"""
64+
Implement the backward propagation for a single SIGMOID unit.
65+
66+
Arguments:
67+
dA -- post-activation gradient, of any shape
68+
cache -- 'Z' where we store for computing backward propagation efficiently
69+
70+
Returns:
71+
dZ -- Gradient of the cost with respect to Z
72+
"""
73+
74+
Z = cache
75+
76+
s = 1/(1+np.exp(-Z))
77+
dZ = dA * s * (1-s)
78+
79+
assert (dZ.shape == Z.shape)
80+
81+
return dZ
82+
Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
import numpy as np
2+
3+
def linear_forward_test_case():
4+
np.random.seed(1)
5+
"""
6+
X = np.array([[-1.02387576, 1.12397796],
7+
[-1.62328545, 0.64667545],
8+
[-1.74314104, -0.59664964]])
9+
W = np.array([[ 0.74505627, 1.97611078, -1.24412333]])
10+
b = np.array([[1]])
11+
"""
12+
A = np.random.randn(3,2)
13+
W = np.random.randn(1,3)
14+
b = np.random.randn(1,1)
15+
16+
return A, W, b
17+
18+
def linear_activation_forward_test_case():
19+
"""
20+
X = np.array([[-1.02387576, 1.12397796],
21+
[-1.62328545, 0.64667545],
22+
[-1.74314104, -0.59664964]])
23+
W = np.array([[ 0.74505627, 1.97611078, -1.24412333]])
24+
b = 5
25+
"""
26+
np.random.seed(2)
27+
A_prev = np.random.randn(3,2)
28+
W = np.random.randn(1,3)
29+
b = np.random.randn(1,1)
30+
return A_prev, W, b
31+
32+
def L_model_forward_test_case():
33+
"""
34+
X = np.array([[-1.02387576, 1.12397796],
35+
[-1.62328545, 0.64667545],
36+
[-1.74314104, -0.59664964]])
37+
parameters = {'W1': np.array([[ 1.62434536, -0.61175641, -0.52817175],
38+
[-1.07296862, 0.86540763, -2.3015387 ]]),
39+
'W2': np.array([[ 1.74481176, -0.7612069 ]]),
40+
'b1': np.array([[ 0.],
41+
[ 0.]]),
42+
'b2': np.array([[ 0.]])}
43+
"""
44+
np.random.seed(1)
45+
X = np.random.randn(4,2)
46+
W1 = np.random.randn(3,4)
47+
b1 = np.random.randn(3,1)
48+
W2 = np.random.randn(1,3)
49+
b2 = np.random.randn(1,1)
50+
parameters = {"W1": W1,
51+
"b1": b1,
52+
"W2": W2,
53+
"b2": b2}
54+
55+
return X, parameters
56+
57+
def compute_cost_test_case():
58+
Y = np.asarray([[1, 1, 1]])
59+
aL = np.array([[.8,.9,0.4]])
60+
61+
return Y, aL
62+
63+
def linear_backward_test_case():
64+
"""
65+
z, linear_cache = (np.array([[-0.8019545 , 3.85763489]]), (np.array([[-1.02387576, 1.12397796],
66+
[-1.62328545, 0.64667545],
67+
[-1.74314104, -0.59664964]]), np.array([[ 0.74505627, 1.97611078, -1.24412333]]), np.array([[1]]))
68+
"""
69+
np.random.seed(1)
70+
dZ = np.random.randn(1,2)
71+
A = np.random.randn(3,2)
72+
W = np.random.randn(1,3)
73+
b = np.random.randn(1,1)
74+
linear_cache = (A, W, b)
75+
return dZ, linear_cache
76+
77+
def linear_activation_backward_test_case():
78+
"""
79+
aL, linear_activation_cache = (np.array([[ 3.1980455 , 7.85763489]]), ((np.array([[-1.02387576, 1.12397796], [-1.62328545, 0.64667545], [-1.74314104, -0.59664964]]), np.array([[ 0.74505627, 1.97611078, -1.24412333]]), 5), np.array([[ 3.1980455 , 7.85763489]])))
80+
"""
81+
np.random.seed(2)
82+
dA = np.random.randn(1,2)
83+
A = np.random.randn(3,2)
84+
W = np.random.randn(1,3)
85+
b = np.random.randn(1,1)
86+
Z = np.random.randn(1,2)
87+
linear_cache = (A, W, b)
88+
activation_cache = Z
89+
linear_activation_cache = (linear_cache, activation_cache)
90+
91+
return dA, linear_activation_cache
92+
93+
def L_model_backward_test_case():
94+
"""
95+
X = np.random.rand(3,2)
96+
Y = np.array([[1, 1]])
97+
parameters = {'W1': np.array([[ 1.78862847, 0.43650985, 0.09649747]]), 'b1': np.array([[ 0.]])}
98+
99+
aL, caches = (np.array([[ 0.60298372, 0.87182628]]), [((np.array([[ 0.20445225, 0.87811744],
100+
[ 0.02738759, 0.67046751],
101+
[ 0.4173048 , 0.55868983]]),
102+
np.array([[ 1.78862847, 0.43650985, 0.09649747]]),
103+
np.array([[ 0.]])),
104+
np.array([[ 0.41791293, 1.91720367]]))])
105+
"""
106+
np.random.seed(3)
107+
AL = np.random.randn(1, 2)
108+
Y = np.array([[1, 0]])
109+
110+
A1 = np.random.randn(4,2)
111+
W1 = np.random.randn(3,4)
112+
b1 = np.random.randn(3,1)
113+
Z1 = np.random.randn(3,2)
114+
linear_cache_activation_1 = ((A1, W1, b1), Z1)
115+
116+
A2 = np.random.randn(3,2)
117+
W2 = np.random.randn(1,3)
118+
b2 = np.random.randn(1,1)
119+
Z2 = np.random.randn(1,2)
120+
linear_cache_activation_2 = ( (A2, W2, b2), Z2)
121+
122+
caches = (linear_cache_activation_1, linear_cache_activation_2)
123+
124+
return AL, Y, caches
125+
126+
def update_parameters_test_case():
127+
"""
128+
parameters = {'W1': np.array([[ 1.78862847, 0.43650985, 0.09649747],
129+
[-1.8634927 , -0.2773882 , -0.35475898],
130+
[-0.08274148, -0.62700068, -0.04381817],
131+
[-0.47721803, -1.31386475, 0.88462238]]),
132+
'W2': np.array([[ 0.88131804, 1.70957306, 0.05003364, -0.40467741],
133+
[-0.54535995, -1.54647732, 0.98236743, -1.10106763],
134+
[-1.18504653, -0.2056499 , 1.48614836, 0.23671627]]),
135+
'W3': np.array([[-1.02378514, -0.7129932 , 0.62524497],
136+
[-0.16051336, -0.76883635, -0.23003072]]),
137+
'b1': np.array([[ 0.],
138+
[ 0.],
139+
[ 0.],
140+
[ 0.]]),
141+
'b2': np.array([[ 0.],
142+
[ 0.],
143+
[ 0.]]),
144+
'b3': np.array([[ 0.],
145+
[ 0.]])}
146+
grads = {'dW1': np.array([[ 0.63070583, 0.66482653, 0.18308507],
147+
[ 0. , 0. , 0. ],
148+
[ 0. , 0. , 0. ],
149+
[ 0. , 0. , 0. ]]),
150+
'dW2': np.array([[ 1.62934255, 0. , 0. , 0. ],
151+
[ 0. , 0. , 0. , 0. ],
152+
[ 0. , 0. , 0. , 0. ]]),
153+
'dW3': np.array([[-1.40260776, 0. , 0. ]]),
154+
'da1': np.array([[ 0.70760786, 0.65063504],
155+
[ 0.17268975, 0.15878569],
156+
[ 0.03817582, 0.03510211]]),
157+
'da2': np.array([[ 0.39561478, 0.36376198],
158+
[ 0.7674101 , 0.70562233],
159+
[ 0.0224596 , 0.02065127],
160+
[-0.18165561, -0.16702967]]),
161+
'da3': np.array([[ 0.44888991, 0.41274769],
162+
[ 0.31261975, 0.28744927],
163+
[-0.27414557, -0.25207283]]),
164+
'db1': 0.75937676204411464,
165+
'db2': 0.86163759922811056,
166+
'db3': -0.84161956022334572}
167+
"""
168+
np.random.seed(2)
169+
W1 = np.random.randn(3,4)
170+
b1 = np.random.randn(3,1)
171+
W2 = np.random.randn(1,3)
172+
b2 = np.random.randn(1,1)
173+
parameters = {"W1": W1,
174+
"b1": b1,
175+
"W2": W2,
176+
"b2": b2}
177+
np.random.seed(3)
178+
dW1 = np.random.randn(3,4)
179+
db1 = np.random.randn(3,1)
180+
dW2 = np.random.randn(1,3)
181+
db2 = np.random.randn(1,1)
182+
grads = {"dW1": dW1,
183+
"db1": db1,
184+
"dW2": dW2,
185+
"db2": db2}
186+
187+
return parameters, grads

0 commit comments

Comments
 (0)