Skip to content

Commit 8064103

Browse files
committed
several changes
1 parent 6b64eb8 commit 8064103

25 files changed

+12375
-737
lines changed

code/SdA.py

Lines changed: 436 additions & 436 deletions
Large diffs are not rendered by default.

code/SdA_v2.py

Lines changed: 686 additions & 0 deletions
Large diffs are not rendered by default.

code/VanillaNN.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
import numpy as np
2+
3+
# sigmoid function
4+
def nonlin(x,deriv=False):
5+
if(deriv==True):
6+
return x*(1-x)
7+
return 1/(1+np.exp(-x))
8+
9+
# input dataset
10+
X = np.array([ [0,0,1],
11+
[0,1,1],
12+
[1,0,1],
13+
[1,1,1] ])
14+
15+
# output dataset
16+
y = np.array([[0,0,1,1]]).T
17+
18+
# seed random numbers to make calculation
19+
# deterministic (just a good practice)
20+
np.random.seed(1)
21+
22+
# initialize weights randomly with mean 0
23+
syn0 = 2*np.random.random((3,1)) - 1
24+
25+
for iter in xrange(10000):
26+
27+
# forward propagation
28+
l0 = X
29+
l1 = nonlin(np.dot(l0,syn0))
30+
31+
# how much did we miss?
32+
l1_error = y - l1
33+
34+
# multiply how much we missed by the
35+
# slope of the sigmoid at the values in l1
36+
l1_delta = l1_error * nonlin(l1,True)
37+
38+
# update weights
39+
syn0 += np.dot(l0.T,l1_delta)
40+
41+
print "Output After Training:"
42+
print l1

code/VanillaNNLayers.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import numpy as np
2+
3+
def nonlin(x,deriv=False):
4+
if(deriv==True):
5+
return x*(1-x)
6+
7+
return 1/(1+np.exp(-x))
8+
9+
X = np.array([[1,0,0],
10+
[1,0,1],
11+
[1,1,0],
12+
[1,1,1]])
13+
14+
y = np.array([[0],
15+
[0],
16+
[0],
17+
[1]])
18+
19+
np.random.seed(1)
20+
21+
# randomly initialize our weights with mean 0
22+
syn0 = 2*np.random.random((3,4)) - 1
23+
b0 = np.array([[1]])
24+
syn1 = 2*np.random.random((4,1)) - 1
25+
b1 = np.array([[1]])
26+
27+
print b0
28+
#syn0[:,3] = 1
29+
#syn1[:,3] = -1
30+
#syn1[:,1] = 1
31+
#print syn1
32+
33+
for j in xrange(60000):
34+
35+
# Feed forward through layers 0, 1, and 2
36+
l0 = X
37+
l1 = nonlin(np.dot(l0,syn0)) + b0
38+
l2 = nonlin(np.dot(l1,syn1)) + b1
39+
40+
# how much did we miss the target value?
41+
l2_error = y - l2
42+
43+
if (j% 10000) == 0:
44+
print "Error:" + str(np.mean(np.abs(l2_error)))
45+
46+
# in what direction is the target value?
47+
# were we really sure? if so, don't change too much.
48+
l2_delta = l2_error*nonlin(l2,deriv=True)
49+
50+
# how much did each l1 value contribute to the l2 error (according to the weights)?
51+
l1_error = l2_delta.dot(syn1.T)
52+
53+
# in what direction is the target l1?
54+
# were we really sure? if so, don't change too much.
55+
l1_delta = l1_error * nonlin(l1,deriv=True)
56+
57+
syn1 += l1.T.dot(l2_delta)
58+
syn0 += l0.T.dot(l1_delta)
59+
60+
61+
print "Output After Training:"
62+
print l2

code/convolutional_mlp.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
149149
n_valid_batches /= batch_size
150150
n_test_batches /= batch_size
151151

152+
print test_set_x.get_value()[0].shape
152153
# allocate symbolic variables for the data
153154
index = T.lscalar() # index to a [mini]batch
154155

0 commit comments

Comments
 (0)