Skip to content

Commit 2776035

Browse files
committed
Merge pull request lisa-lab#150 from nouiz/master2
small stuff and use update interface to Theano 0.8
2 parents 7768809 + ee5c0cb commit 2776035

File tree

5 files changed

+10
-8
lines changed

5 files changed

+10
-8
lines changed

code/DBN.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def pretraining_functions(self, train_set_x, batch_size, k):
174174

175175
# compile the theano function
176176
fn = theano.function(
177-
inputs=[index, theano.Param(learning_rate, default=0.1)],
177+
inputs=[index, theano.In(learning_rate, value=0.1)],
178178
outputs=cost,
179179
updates=updates,
180180
givens={

code/convolutional_mlp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
9494
input=input,
9595
filters=self.W,
9696
filter_shape=filter_shape,
97-
image_shape=image_shape
97+
input_shape=image_shape
9898
)
9999

100100
# downsample each feature map individually, using maxpooling

code/rbm.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,8 @@ def get_cost_updates(self, lr=0.1, persistent=None, k=1):
257257
# chain_start is the initial state corresponding to the
258258
# 6th output
259259
outputs_info=[None, None, None, None, None, chain_start],
260-
n_steps=k
260+
n_steps=k,
261+
name="gibbs_hvh"
261262
)
262263
# start-snippet-3
263264
# determine gradients on RBM parameters
@@ -496,7 +497,8 @@ def test_rbm(learning_rate=0.1, training_epochs=15,
496497
) = theano.scan(
497498
rbm.gibbs_vhv,
498499
outputs_info=[None, None, None, None, None, persistent_vis_chain],
499-
n_steps=plot_every
500+
n_steps=plot_every,
501+
name="gibbs_vhv"
500502
)
501503

502504
# add to updates the shared variable that takes care of our persistent

data/download.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ WGET=$?
55
which curl >/dev/null 2>&1
66
CURL=$?
77
if [ "$WGET" -eq 0 ]; then
8-
DL_CMD="wget -c"
8+
DL_CMD="wget --no-verbose -c"
99
elif [ "$CURL" -eq 0 ]; then
1010
DL_CMD="curl -C - -O"
1111
else

doc/lenet.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ one of Figure 1. The input consists of 3 features maps (an RGB color image) of s
196196

197197
import theano
198198
from theano import tensor as T
199-
from theano.tensor.nnet import conv
199+
from theano.tensor.nnet import conv2d
200200

201201
import numpy
202202

@@ -226,7 +226,7 @@ one of Figure 1. The input consists of 3 features maps (an RGB color image) of s
226226
dtype=input.dtype), name ='b')
227227

228228
# build symbolic expression that computes the convolution of input with filters in w
229-
conv_out = conv.conv2d(input, W)
229+
conv_out = conv2d(input, W)
230230

231231
# build symbolic expression to add bias and apply activation function, i.e. produce neural net layer output
232232
# A few words on ``dimshuffle`` :
@@ -404,7 +404,7 @@ to be compatible with our previous MLP implementation.
404404
Note that the term "convolution" could corresponds to different mathematical operations:
405405

406406
1. `theano.tensor.nnet.conv2d
407-
<http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv.conv2d>`_,
407+
<http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv2d>`_,
408408
which is the most common one in almost all of the recent published
409409
convolutional models.
410410
In this operation, each output feature map is connected to each

0 commit comments

Comments
 (0)