Skip to content

Commit 1bb3230

Browse files
committed
Merge pull request lisa-lab#132 from gyom/master
made tutorials compatible with python 3
2 parents 50b5010 + 8ca9239 commit 1bb3230

20 files changed

+259
-212
lines changed

.travis.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ sudo: false
44

55
language: python
66
#python:
7-
# - "2.7"
8-
# - "3.2"
7+
# - "2.6"
8+
# - "3.3"
99
# command to install dependencies
1010
before_install:
1111
- wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh

code/DBN.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
7575
# training the DBN by doing stochastic gradient descent on the
7676
# MLP.
7777

78-
for i in xrange(self.n_layers):
78+
for i in range(self.n_layers):
7979
# construct the sigmoidal layer
8080

8181
# the size of the input is either the number of hidden
@@ -267,11 +267,11 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
267267

268268
# Create a function that scans the entire validation set
269269
def valid_score():
270-
return [valid_score_i(i) for i in xrange(n_valid_batches)]
270+
return [valid_score_i(i) for i in range(n_valid_batches)]
271271

272272
# Create a function that scans the entire test set
273273
def test_score():
274-
return [test_score_i(i) for i in xrange(n_test_batches)]
274+
return [test_score_i(i) for i in range(n_test_batches)]
275275

276276
return train_fn, valid_score, test_score
277277

@@ -329,12 +329,12 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
329329
print '... pre-training the model'
330330
start_time = timeit.default_timer()
331331
## Pre-train layer-wise
332-
for i in xrange(dbn.n_layers):
332+
for i in range(dbn.n_layers):
333333
# go through pretraining epochs
334-
for epoch in xrange(pretraining_epochs):
334+
for epoch in range(pretraining_epochs):
335335
# go through the training set
336336
c = []
337-
for batch_index in xrange(n_train_batches):
337+
for batch_index in range(n_train_batches):
338338
c.append(pretraining_fns[i](index=batch_index,
339339
lr=pretrain_lr))
340340
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
@@ -379,7 +379,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
379379

380380
while (epoch < training_epochs) and (not done_looping):
381381
epoch = epoch + 1
382-
for minibatch_index in xrange(n_train_batches):
382+
for minibatch_index in range(n_train_batches):
383383

384384
minibatch_avg_cost = train_fn(minibatch_index)
385385
iter = (epoch - 1) * n_train_batches + minibatch_index

code/SdA.py

Lines changed: 26 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,9 @@
2929
Systems 19, 2007
3030
3131
"""
32+
33+
from __future__ import print_function
34+
3235
import os
3336
import sys
3437
import timeit
@@ -116,7 +119,7 @@ def __init__(
116119
# stochastich gradient descent on the MLP
117120

118121
# start-snippet-2
119-
for i in xrange(self.n_layers):
122+
for i in range(self.n_layers):
120123
# construct the sigmoidal layer
121124

122125
# the size of the input is either the number of hidden units of
@@ -254,9 +257,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
254257

255258
# compute number of minibatches for training, validation and testing
256259
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
257-
n_valid_batches /= batch_size
260+
n_valid_batches //= batch_size
258261
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
259-
n_test_batches /= batch_size
262+
n_test_batches //= batch_size
260263

261264
index = T.lscalar('index') # index to a [mini]batch
262265

@@ -314,11 +317,11 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
314317

315318
# Create a function that scans the entire validation set
316319
def valid_score():
317-
return [valid_score_i(i) for i in xrange(n_valid_batches)]
320+
return [valid_score_i(i) for i in range(n_valid_batches)]
318321

319322
# Create a function that scans the entire test set
320323
def test_score():
321-
return [test_score_i(i) for i in xrange(n_test_batches)]
324+
return [test_score_i(i) for i in range(n_test_batches)]
322325

323326
return train_fn, valid_score, test_score
324327

@@ -357,12 +360,12 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
357360

358361
# compute number of minibatches for training, validation and testing
359362
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
360-
n_train_batches /= batch_size
363+
n_train_batches //= batch_size
361364

362365
# numpy random generator
363366
# start-snippet-3
364367
numpy_rng = numpy.random.RandomState(89677)
365-
print '... building the model'
368+
print('... building the model')
366369
# construct the stacked denoising autoencoder class
367370
sda = SdA(
368371
numpy_rng=numpy_rng,
@@ -374,52 +377,51 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
374377
#########################
375378
# PRETRAINING THE MODEL #
376379
#########################
377-
print '... getting the pretraining functions'
380+
print('... getting the pretraining functions')
378381
pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
379382
batch_size=batch_size)
380383

381-
print '... pre-training the model'
384+
print('... pre-training the model')
382385
start_time = timeit.default_timer()
383386
## Pre-train layer-wise
384387
corruption_levels = [.1, .2, .3]
385-
for i in xrange(sda.n_layers):
388+
for i in range(sda.n_layers):
386389
# go through pretraining epochs
387-
for epoch in xrange(pretraining_epochs):
390+
for epoch in range(pretraining_epochs):
388391
# go through the training set
389392
c = []
390-
for batch_index in xrange(n_train_batches):
393+
for batch_index in range(n_train_batches):
391394
c.append(pretraining_fns[i](index=batch_index,
392395
corruption=corruption_levels[i],
393396
lr=pretrain_lr))
394-
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
395-
print numpy.mean(c)
397+
print('Pre-training layer %i, epoch %d, cost %f' % (i, epoch, numpy.mean(c)))
396398

397399
end_time = timeit.default_timer()
398400

399-
print >> sys.stderr, ('The pretraining code for file ' +
400-
os.path.split(__file__)[1] +
401-
' ran for %.2fm' % ((end_time - start_time) / 60.))
401+
print(('The pretraining code for file ' +
402+
os.path.split(__file__)[1] +
403+
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
402404
# end-snippet-4
403405
########################
404406
# FINETUNING THE MODEL #
405407
########################
406408

407409
# get the training, validation and testing function for the model
408-
print '... getting the finetuning functions'
410+
print('... getting the finetuning functions')
409411
train_fn, validate_model, test_model = sda.build_finetune_functions(
410412
datasets=datasets,
411413
batch_size=batch_size,
412414
learning_rate=finetune_lr
413415
)
414416

415-
print '... finetunning the model'
417+
print('... finetunning the model')
416418
# early-stopping parameters
417419
patience = 10 * n_train_batches # look as this many examples regardless
418420
patience_increase = 2. # wait this much longer when a new best is
419421
# found
420422
improvement_threshold = 0.995 # a relative improvement of this much is
421423
# considered significant
422-
validation_frequency = min(n_train_batches, patience / 2)
424+
validation_frequency = min(n_train_batches, patience // 2)
423425
# go through this many
424426
# minibatche before checking the network
425427
# on the validation set; in this case we
@@ -434,7 +436,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
434436

435437
while (epoch < training_epochs) and (not done_looping):
436438
epoch = epoch + 1
437-
for minibatch_index in xrange(n_train_batches):
439+
for minibatch_index in range(n_train_batches):
438440
minibatch_avg_cost = train_fn(minibatch_index)
439441
iter = (epoch - 1) * n_train_batches + minibatch_index
440442

@@ -480,9 +482,9 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
480482
)
481483
% (best_validation_loss * 100., best_iter + 1, test_score * 100.)
482484
)
483-
print >> sys.stderr, ('The training code for file ' +
484-
os.path.split(__file__)[1] +
485-
' ran for %.2fm' % ((end_time - start_time) / 60.))
485+
print(('The training code for file ' +
486+
os.path.split(__file__)[1] +
487+
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
486488

487489

488490
if __name__ == '__main__':

code/cA.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@
2828
Systems 19, 2007
2929
3030
"""
31+
32+
from __future__ import print_function
33+
3134
import os
3235
import sys
3336
import timeit
@@ -205,7 +208,7 @@ def get_cost_updates(self, contraction_level, learning_rate):
205208
axis=1)
206209

207210
# Compute the jacobian and average over the number of samples/minibatch
208-
self.L_jacob = T.sum(J ** 2) / self.n_batchsize
211+
self.L_jacob = T.sum(J ** 2) // self.n_batchsize
209212

210213
# note : L is now a vector, where each element is the
211214
# cross-entropy cost of the reconstruction of the
@@ -246,7 +249,7 @@ def test_cA(learning_rate=0.01, training_epochs=20,
246249
train_set_x, train_set_y = datasets[0]
247250

248251
# compute number of minibatches for training, validation and testing
249-
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
252+
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
250253

251254
# allocate symbolic variables for the data
252255
index = T.lscalar() # index to a [mini]batch
@@ -283,22 +286,22 @@ def test_cA(learning_rate=0.01, training_epochs=20,
283286
############
284287

285288
# go through training epochs
286-
for epoch in xrange(training_epochs):
289+
for epoch in range(training_epochs):
287290
# go through trainng set
288291
c = []
289-
for batch_index in xrange(n_train_batches):
292+
for batch_index in range(n_train_batches):
290293
c.append(train_ca(batch_index))
291294

292295
c_array = numpy.vstack(c)
293-
print 'Training epoch %d, reconstruction cost ' % epoch, numpy.mean(
294-
c_array[0]), ' jacobian norm ', numpy.mean(numpy.sqrt(c_array[1]))
296+
print('Training epoch %d, reconstruction cost ' % epoch, numpy.mean(
297+
c_array[0]), ' jacobian norm ', numpy.mean(numpy.sqrt(c_array[1])))
295298

296299
end_time = timeit.default_timer()
297300

298301
training_time = (end_time - start_time)
299302

300-
print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] +
301-
' ran for %.2fm' % ((training_time) / 60.))
303+
print(('The code for file ' + os.path.split(__file__)[1] +
304+
' ran for %.2fm' % ((training_time) / 60.)), file=sys.stderr)
302305
image = Image.fromarray(tile_raster_images(
303306
X=ca.W.get_value(borrow=True).T,
304307
img_shape=(28, 28), tile_shape=(10, 10),

code/convolutional_mlp.py

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@
2121
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
2222
2323
"""
24+
25+
from __future__ import print_function
26+
2427
import os
2528
import sys
2629
import timeit
@@ -70,7 +73,7 @@ def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
7073
# each unit in the lower layer receives a gradient from:
7174
# "num output feature maps * filter height * filter width" /
7275
# pooling size
73-
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
76+
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //
7477
numpy.prod(poolsize))
7578
# initialize weights with random weights
7679
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
@@ -145,9 +148,9 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
145148
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
146149
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
147150
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
148-
n_train_batches /= batch_size
149-
n_valid_batches /= batch_size
150-
n_test_batches /= batch_size
151+
n_train_batches //= batch_size
152+
n_valid_batches //= batch_size
153+
n_test_batches //= batch_size
151154

152155
# allocate symbolic variables for the data
153156
index = T.lscalar() # index to a [mini]batch
@@ -160,7 +163,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
160163
######################
161164
# BUILD ACTUAL MODEL #
162165
######################
163-
print '... building the model'
166+
print('... building the model')
164167

165168
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
166169
# to a 4D tensor, compatible with our LeNetConvPoolLayer
@@ -261,14 +264,14 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
261264
###############
262265
# TRAIN MODEL #
263266
###############
264-
print '... training'
267+
print('... training')
265268
# early-stopping parameters
266269
patience = 10000 # look as this many examples regardless
267270
patience_increase = 2 # wait this much longer when a new best is
268271
# found
269272
improvement_threshold = 0.995 # a relative improvement of this much is
270273
# considered significant
271-
validation_frequency = min(n_train_batches, patience / 2)
274+
validation_frequency = min(n_train_batches, patience // 2)
272275
# go through this many
273276
# minibatche before checking the network
274277
# on the validation set; in this case we
@@ -284,19 +287,19 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
284287

285288
while (epoch < n_epochs) and (not done_looping):
286289
epoch = epoch + 1
287-
for minibatch_index in xrange(n_train_batches):
290+
for minibatch_index in range(n_train_batches):
288291

289292
iter = (epoch - 1) * n_train_batches + minibatch_index
290293

291294
if iter % 100 == 0:
292-
print 'training @ iter = ', iter
295+
print('training @ iter = ', iter)
293296
cost_ij = train_model(minibatch_index)
294297

295298
if (iter + 1) % validation_frequency == 0:
296299

297300
# compute zero-one loss on validation set
298301
validation_losses = [validate_model(i) for i
299-
in xrange(n_valid_batches)]
302+
in range(n_valid_batches)]
300303
this_validation_loss = numpy.mean(validation_losses)
301304
print('epoch %i, minibatch %i/%i, validation error %f %%' %
302305
(epoch, minibatch_index + 1, n_train_batches,
@@ -317,7 +320,7 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
317320
# test it on the test set
318321
test_losses = [
319322
test_model(i)
320-
for i in xrange(n_test_batches)
323+
for i in range(n_test_batches)
321324
]
322325
test_score = numpy.mean(test_losses)
323326
print((' epoch %i, minibatch %i/%i, test error of '
@@ -334,9 +337,9 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
334337
print('Best validation score of %f %% obtained at iteration %i, '
335338
'with test performance %f %%' %
336339
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
337-
print >> sys.stderr, ('The code for file ' +
338-
os.path.split(__file__)[1] +
339-
' ran for %.2fm' % ((end_time - start_time) / 60.))
340+
print(('The code for file ' +
341+
os.path.split(__file__)[1] +
342+
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
340343

341344
if __name__ == '__main__':
342345
evaluate_lenet5()

0 commit comments

Comments
 (0)