Skip to content

Commit 9d0ef62

Browse files
committed
Planar data classification with a hidden layer
Week 2 Jupyter Notebook classification problem
1 parent d6d2605 commit 9d0ef62

13 files changed

+3039
-80
lines changed

Week2/Logistic Regression with a Neural Network mindset/.ipynb_checkpoints/Logistic+Regression+with+a+Neural+Network+mindset+v3-checkpoint.ipynb

Lines changed: 1251 additions & 0 deletions
Large diffs are not rendered by default.

Week2/Logistic Regression with a Neural Network mindset/Logistic+Regression+with+a+Neural+Network+mindset+v3.ipynb

Lines changed: 21 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -35,21 +35,8 @@
3535
{
3636
"cell_type": "code",
3737
"execution_count": 1,
38-
"metadata": {
39-
"collapsed": false
40-
},
41-
"outputs": [
42-
{
43-
"name": "stderr",
44-
"output_type": "stream",
45-
"text": [
46-
"/opt/conda/lib/python3.5/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.\n",
47-
" warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')\n",
48-
"/opt/conda/lib/python3.5/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.\n",
49-
" warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')\n"
50-
]
51-
}
52-
],
38+
"metadata": {},
39+
"outputs": [],
5340
"source": [
5441
"import numpy as np\n",
5542
"import matplotlib.pyplot as plt\n",
@@ -83,9 +70,7 @@
8370
{
8471
"cell_type": "code",
8572
"execution_count": 2,
86-
"metadata": {
87-
"collapsed": false
88-
},
73+
"metadata": {},
8974
"outputs": [],
9075
"source": [
9176
"# Loading the data (cat/non-cat)\n",
@@ -104,9 +89,7 @@
10489
{
10590
"cell_type": "code",
10691
"execution_count": 8,
107-
"metadata": {
108-
"collapsed": false
109-
},
92+
"metadata": {},
11093
"outputs": [
11194
{
11295
"name": "stdout",
@@ -150,7 +133,6 @@
150133
"cell_type": "code",
151134
"execution_count": 9,
152135
"metadata": {
153-
"collapsed": false,
154136
"scrolled": true
155137
},
156138
"outputs": [
@@ -227,9 +209,7 @@
227209
{
228210
"cell_type": "code",
229211
"execution_count": 32,
230-
"metadata": {
231-
"collapsed": false
232-
},
212+
"metadata": {},
233213
"outputs": [
234214
{
235215
"name": "stdout",
@@ -304,9 +284,7 @@
304284
{
305285
"cell_type": "code",
306286
"execution_count": 34,
307-
"metadata": {
308-
"collapsed": false
309-
},
287+
"metadata": {},
310288
"outputs": [],
311289
"source": [
312290
"train_set_x = train_set_x_flatten/255.\n",
@@ -380,9 +358,7 @@
380358
{
381359
"cell_type": "code",
382360
"execution_count": 35,
383-
"metadata": {
384-
"collapsed": false
385-
},
361+
"metadata": {},
386362
"outputs": [],
387363
"source": [
388364
"# GRADED FUNCTION: sigmoid\n",
@@ -409,7 +385,6 @@
409385
"cell_type": "code",
410386
"execution_count": 36,
411387
"metadata": {
412-
"collapsed": false,
413388
"scrolled": true
414389
},
415390
"outputs": [
@@ -451,9 +426,7 @@
451426
{
452427
"cell_type": "code",
453428
"execution_count": 45,
454-
"metadata": {
455-
"collapsed": false
456-
},
429+
"metadata": {},
457430
"outputs": [],
458431
"source": [
459432
"# GRADED FUNCTION: initialize_with_zeros\n",
@@ -484,9 +457,7 @@
484457
{
485458
"cell_type": "code",
486459
"execution_count": 46,
487-
"metadata": {
488-
"collapsed": false
489-
},
460+
"metadata": {},
490461
"outputs": [
491462
{
492463
"name": "stdout",
@@ -553,9 +524,7 @@
553524
{
554525
"cell_type": "code",
555526
"execution_count": 72,
556-
"metadata": {
557-
"collapsed": false
558-
},
527+
"metadata": {},
559528
"outputs": [],
560529
"source": [
561530
"# GRADED FUNCTION: propagate\n",
@@ -607,9 +576,7 @@
607576
{
608577
"cell_type": "code",
609578
"execution_count": 73,
610-
"metadata": {
611-
"collapsed": false
612-
},
579+
"metadata": {},
613580
"outputs": [
614581
{
615582
"name": "stdout",
@@ -669,9 +636,7 @@
669636
{
670637
"cell_type": "code",
671638
"execution_count": 91,
672-
"metadata": {
673-
"collapsed": false
674-
},
639+
"metadata": {},
675640
"outputs": [],
676641
"source": [
677642
"# GRADED FUNCTION: optimize\n",
@@ -740,9 +705,7 @@
740705
{
741706
"cell_type": "code",
742707
"execution_count": 92,
743-
"metadata": {
744-
"collapsed": false
745-
},
708+
"metadata": {},
746709
"outputs": [
747710
{
748711
"name": "stdout",
@@ -811,9 +774,7 @@
811774
{
812775
"cell_type": "code",
813776
"execution_count": 95,
814-
"metadata": {
815-
"collapsed": false
816-
},
777+
"metadata": {},
817778
"outputs": [],
818779
"source": [
819780
"# GRADED FUNCTION: predict\n",
@@ -858,9 +819,7 @@
858819
{
859820
"cell_type": "code",
860821
"execution_count": 96,
861-
"metadata": {
862-
"collapsed": false
863-
},
822+
"metadata": {},
864823
"outputs": [
865824
{
866825
"name": "stdout",
@@ -926,9 +885,7 @@
926885
{
927886
"cell_type": "code",
928887
"execution_count": 106,
929-
"metadata": {
930-
"collapsed": false
931-
},
888+
"metadata": {},
932889
"outputs": [],
933890
"source": [
934891
"# GRADED FUNCTION: model\n",
@@ -994,9 +951,7 @@
994951
{
995952
"cell_type": "code",
996953
"execution_count": 107,
997-
"metadata": {
998-
"collapsed": false
999-
},
954+
"metadata": {},
1000955
"outputs": [
1001956
{
1002957
"name": "stdout",
@@ -1047,9 +1002,7 @@
10471002
{
10481003
"cell_type": "code",
10491004
"execution_count": 160,
1050-
"metadata": {
1051-
"collapsed": false
1052-
},
1005+
"metadata": {},
10531006
"outputs": [
10541007
{
10551008
"name": "stdout",
@@ -1109,9 +1062,7 @@
11091062
{
11101063
"cell_type": "code",
11111064
"execution_count": 109,
1112-
"metadata": {
1113-
"collapsed": false
1114-
},
1065+
"metadata": {},
11151066
"outputs": [
11161067
{
11171068
"data": {
@@ -1168,9 +1119,7 @@
11681119
{
11691120
"cell_type": "code",
11701121
"execution_count": null,
1171-
"metadata": {
1172-
"collapsed": false
1173-
},
1122+
"metadata": {},
11741123
"outputs": [],
11751124
"source": [
11761125
"learning_rates = [0.01, 0.001, 0.0001]\n",
@@ -1224,7 +1173,6 @@
12241173
"cell_type": "code",
12251174
"execution_count": null,
12261175
"metadata": {
1227-
"collapsed": false,
12281176
"scrolled": false
12291177
},
12301178
"outputs": [],
@@ -1295,7 +1243,7 @@
12951243
"name": "python",
12961244
"nbconvert_exporter": "python",
12971245
"pygments_lexer": "ipython3",
1298-
"version": "3.5.2"
1246+
"version": "3.5.3"
12991247
}
13001248
},
13011249
"nbformat": 4,

Week2/Logistic Regression with a Neural Network mindset/Logistic_Regression_NN.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -691,14 +691,10 @@ def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate
691691
num_d = d["Y_prediction_test"]
692692
num_y = test_set_y
693693
true_y_d = (num_d == num_y)
694-
for i in true_y_d:
695-
if i == "False":
696-
print(classes[d["Y_prediction_test"][0,index]].decode("utf-8"))
697-
print(classes[np.squeeze(test_set_y[:, index])].decode("utf-8"))
694+
print(classes[d["Y_prediction_test"][0,index]].decode("utf-8"))
695+
print(classes[np.squeeze(test_set_y[:, index])].decode("utf-8"))
698696

699697
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
700-
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
701-
702698

703699
# Let's also plot the cost function and the gradients.
704700

Week2/Logistic Regression with a Neural Network mindset/lr_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44

55
def load_dataset():
6-
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
6+
train_dataset = h5py.File('datasets/train_catvnoncat.h5', 'r')
77
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
88
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
99

Week3/Planar data classification.pdf

1.01 MB
Binary file not shown.

Week3/Planar+data+classification+with+one+hidden+layer+v3.ipynb

Lines changed: 1579 additions & 0 deletions
Large diffs are not rendered by default.

Week3/classification_kiank.png

136 KB
Loading

Week3/grad_summary.png

666 KB
Loading

Week3/planar_utils.py

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
import matplotlib.pyplot as plt
2+
import numpy as np
3+
import sklearn
4+
import sklearn.datasets
5+
import sklearn.linear_model
6+
7+
def plot_decision_boundary(model, X, y):
8+
# Set min and max values and give it some padding
9+
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
10+
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
11+
h = 0.01
12+
# Generate a grid of points with distance h between them
13+
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
14+
# Predict the function value for the whole grid
15+
Z = model(np.c_[xx.ravel(), yy.ravel()])
16+
Z = Z.reshape(xx.shape)
17+
# Plot the contour and training examples
18+
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
19+
plt.ylabel('x2')
20+
plt.xlabel('x1')
21+
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
22+
23+
24+
def sigmoid(x):
25+
"""
26+
Compute the sigmoid of x
27+
28+
Arguments:
29+
x -- A scalar or numpy array of any size.
30+
31+
Return:
32+
s -- sigmoid(x)
33+
"""
34+
s = 1/(1+np.exp(-x))
35+
return s
36+
37+
def load_planar_dataset():
38+
np.random.seed(1)
39+
m = 400 # number of examples
40+
N = int(m/2) # number of points per class
41+
D = 2 # dimensionality
42+
X = np.zeros((m,D)) # data matrix where each row is a single example
43+
Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)
44+
a = 4 # maximum ray of the flower
45+
46+
for j in range(2):
47+
ix = range(N*j,N*(j+1))
48+
t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta
49+
r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius
50+
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
51+
Y[ix] = j
52+
53+
X = X.T
54+
Y = Y.T
55+
56+
return X, Y
57+
58+
def load_extra_datasets():
59+
N = 200
60+
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
61+
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
62+
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
63+
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)
64+
no_structure = np.random.rand(N, 2), np.random.rand(N, 2)
65+
66+
return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure

Week3/sgd-gradient-descent.gif

40.7 KB
Loading

Week3/sgd_bad.gif

62.5 KB
Loading

0 commit comments

Comments
 (0)