Skip to content

Commit eb315c6

Browse files
committed
make the number of hidden unit for mlp parametrisable.
1 parent 7ef0549 commit eb315c6

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

code/mlp.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def __init__(self, rng, input, n_in, n_hidden, n_out):
174174

175175

176176
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
177-
dataset='../data/mnist.pkl.gz', batch_size=20):
177+
dataset='../data/mnist.pkl.gz', batch_size=20, n_hidden=500):
178178
"""
179179
Demonstrate stochastic gradient descent optimization for a multilayer
180180
perceptron
@@ -227,7 +227,8 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
227227
rng = numpy.random.RandomState(1234)
228228

229229
# construct the MLP class
230-
classifier = MLP(rng=rng, input=x, n_in=28 * 28, n_hidden=500, n_out=10)
230+
classifier = MLP(rng=rng, input=x, n_in=28 * 28,
231+
n_hidden=n_hidden, n_out=10)
231232

232233
# the cost we minimize during training is the negative log likelihood of
233234
# the model plus the regularization terms (L1 and L2); cost is expressed

0 commit comments

Comments
 (0)