Skip to content

Commit 4c0858d

Browse files
committed
got rid of all the xrange
1 parent 90b925b commit 4c0858d

File tree

11 files changed

+29
-37
lines changed

11 files changed

+29
-37
lines changed

code/DBN.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
7575
# training the DBN by doing stochastic gradient descent on the
7676
# MLP.
7777

78-
for i in xrange(self.n_layers):
78+
for i in range(self.n_layers):
7979
# construct the sigmoidal layer
8080

8181
# the size of the input is either the number of hidden
@@ -267,11 +267,11 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
267267

268268
# Create a function that scans the entire validation set
269269
def valid_score():
270-
return [valid_score_i(i) for i in xrange(n_valid_batches)]
270+
return [valid_score_i(i) for i in range(n_valid_batches)]
271271

272272
# Create a function that scans the entire test set
273273
def test_score():
274-
return [test_score_i(i) for i in xrange(n_test_batches)]
274+
return [test_score_i(i) for i in range(n_test_batches)]
275275

276276
return train_fn, valid_score, test_score
277277

@@ -329,12 +329,12 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
329329
print '... pre-training the model'
330330
start_time = timeit.default_timer()
331331
## Pre-train layer-wise
332-
for i in xrange(dbn.n_layers):
332+
for i in range(dbn.n_layers):
333333
# go through pretraining epochs
334-
for epoch in xrange(pretraining_epochs):
334+
for epoch in range(pretraining_epochs):
335335
# go through the training set
336336
c = []
337-
for batch_index in xrange(n_train_batches):
337+
for batch_index in range(n_train_batches):
338338
c.append(pretraining_fns[i](index=batch_index,
339339
lr=pretrain_lr))
340340
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
@@ -379,7 +379,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
379379

380380
while (epoch < training_epochs) and (not done_looping):
381381
epoch = epoch + 1
382-
for minibatch_index in xrange(n_train_batches):
382+
for minibatch_index in range(n_train_batches):
383383

384384
minibatch_avg_cost = train_fn(minibatch_index)
385385
iter = (epoch - 1) * n_train_batches + minibatch_index

code/cA.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
"""
3131

3232
from __future__ import print_function
33-
from six.moves import xrange
3433

3534
import os
3635
import sys
@@ -287,10 +286,10 @@ def test_cA(learning_rate=0.01, training_epochs=20,
287286
############
288287

289288
# go through training epochs
290-
for epoch in xrange(training_epochs):
289+
for epoch in range(training_epochs):
291290
# go through trainng set
292291
c = []
293-
for batch_index in xrange(n_train_batches):
292+
for batch_index in range(n_train_batches):
294293
c.append(train_ca(batch_index))
295294

296295
c_array = numpy.vstack(c)

code/hmc/test_hmc.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11

22
from __future__ import print_function
3-
from six.moves import xrange
43

54
import numpy
65
import theano
@@ -39,10 +38,10 @@ def gaussian_energy(x):
3938
initial_stepsize=1e-3, stepsize_max=0.5)
4039

4140
# Start with a burn-in process
42-
garbage = [sampler.draw() for r in xrange(burnin)] # burn-in Draw
41+
garbage = [sampler.draw() for r in range(burnin)] # burn-in Draw
4342
# `n_samples`: result is a 3D tensor of dim [n_samples, batchsize,
4443
# dim]
45-
_samples = numpy.asarray([sampler.draw() for r in xrange(n_samples)])
44+
_samples = numpy.asarray([sampler.draw() for r in range(n_samples)])
4645
# Flatten to [n_samples * batchsize, dim]
4746
samples = _samples.T.reshape(dim, -1).T
4847

code/logistic_cg.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -239,15 +239,15 @@ def cg_optimization_mnist(n_epochs=50, mnist_pkl_gz='mnist.pkl.gz'):
239239
def train_fn(theta_value):
240240
classifier.theta.set_value(theta_value, borrow=True)
241241
train_losses = [batch_cost(i * batch_size)
242-
for i in xrange(n_train_batches)]
242+
for i in range(n_train_batches)]
243243
return numpy.mean(train_losses)
244244

245245
# creates a function that computes the average gradient of cost with
246246
# respect to theta
247247
def train_fn_grad(theta_value):
248248
classifier.theta.set_value(theta_value, borrow=True)
249249
grad = batch_grad(0)
250-
for i in xrange(1, n_train_batches):
250+
for i in range(1, n_train_batches):
251251
grad += batch_grad(i * batch_size)
252252
return grad / n_train_batches
253253

@@ -258,7 +258,7 @@ def callback(theta_value):
258258
classifier.theta.set_value(theta_value, borrow=True)
259259
#compute the validation loss
260260
validation_losses = [validate_model(i * batch_size)
261-
for i in xrange(n_valid_batches)]
261+
for i in range(n_valid_batches)]
262262
this_validation_loss = numpy.mean(validation_losses)
263263
print('validation error %f %%' % (this_validation_loss * 100.,))
264264

@@ -268,7 +268,7 @@ def callback(theta_value):
268268
# testing dataset
269269
validation_scores[0] = this_validation_loss
270270
test_losses = [test_model(i * batch_size)
271-
for i in xrange(n_test_batches)]
271+
for i in range(n_test_batches)]
272272
validation_scores[1] = numpy.mean(test_losses)
273273

274274
###############

code/lstm.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
'''
44

55
from __future__ import print_function
6-
from six.moves import xrange
76
import six.moves.cPickle as pickle
87

98
from collections import OrderedDict
@@ -549,7 +548,7 @@ def train_lstm(
549548
estop = False # early stop
550549
start_time = time.time()
551550
try:
552-
for eidx in xrange(max_epochs):
551+
for eidx in range(max_epochs):
553552
n_samples = 0
554553

555554
# Get new shuffled index for the training set.

code/rbm.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
"""
77

88
from __future__ import print_function
9-
from six.moves import xrange
109

1110
import timeit
1211

@@ -435,11 +434,11 @@ def test_rbm(learning_rate=0.1, training_epochs=15,
435434
start_time = timeit.default_timer()
436435

437436
# go through training epochs
438-
for epoch in xrange(training_epochs):
437+
for epoch in range(training_epochs):
439438

440439
# go through the training set
441440
mean_cost = []
442-
for batch_index in xrange(n_train_batches):
441+
for batch_index in range(n_train_batches):
443442
mean_cost += [train_rbm(batch_index)]
444443

445444
print('Training epoch %d, cost is ' % epoch, numpy.mean(mean_cost))
@@ -522,7 +521,7 @@ def test_rbm(learning_rate=0.1, training_epochs=15,
522521
(29 * n_samples + 1, 29 * n_chains - 1),
523522
dtype='uint8'
524523
)
525-
for idx in xrange(n_samples):
524+
for idx in range(n_samples):
526525
# generate `plot_every` intermediate samples that we discard,
527526
# because successive samples in the chain are too correlated
528527
vis_mf, vis_sample = sample_fn()

code/rnnrbm.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
# More information at http://deeplearning.net/tutorial/rnnrbm.html
55

66
from __future__ import print_function
7-
from six.moves import xrange
87

98
import glob
109
import os
@@ -249,12 +248,12 @@ def train(self, files, batch_size=100, num_epochs=200):
249248
for f in files]
250249

251250
try:
252-
for epoch in xrange(num_epochs):
251+
for epoch in range(num_epochs):
253252
numpy.random.shuffle(dataset)
254253
costs = []
255254

256255
for s, sequence in enumerate(dataset):
257-
for i in xrange(0, len(sequence), batch_size):
256+
for i in range(0, len(sequence), batch_size):
258257
cost = self.train_function(sequence[i:i + batch_size])
259258
costs.append(cost)
260259

code/rnnslu.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11

22
from __future__ import print_function
3-
from six.moves import xrange
43
import six.moves.cPickle as pickle
54

65
from collections import OrderedDict
@@ -322,7 +321,7 @@ def main(param=None):
322321
# train with early stopping on validation set
323322
best_f1 = -numpy.inf
324323
param['clr'] = param['lr']
325-
for e in xrange(param['nepochs']):
324+
for e in range(param['nepochs']):
326325

327326
# shuffle
328327
shuffle([train_lex, train_ne, train_y], param['seed'])

code/utils.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66
image from a set of samples or weights.
77
"""
88

9-
10-
from six.moves import xrange
119
import numpy
1210

1311

@@ -86,7 +84,7 @@ def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
8684
else:
8785
channel_defaults = [0., 0., 0., 1.]
8886

89-
for i in xrange(4):
87+
for i in range(4):
9088
if X[i] is None:
9189
# if channel is None, fill it with zeros of the correct
9290
# dtype
@@ -116,8 +114,8 @@ def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
116114
dt = 'uint8'
117115
out_array = numpy.zeros(out_shape, dtype=dt)
118116

119-
for tile_row in xrange(tile_shape[0]):
120-
for tile_col in xrange(tile_shape[1]):
117+
for tile_row in range(tile_shape[0]):
118+
for tile_col in range(tile_shape[1]):
121119
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
122120
this_x = X[tile_row * tile_shape[1] + tile_col]
123121
if scale_rows_to_unit_interval:

doc/gettingstarted.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -578,7 +578,7 @@ of a strategy based on a geometrically increasing amount of patience.
578578
while (epoch < n_epochs) and (not done_looping):
579579
# Report "1" for first epoch, "n_epochs" for last epoch
580580
epoch = epoch + 1
581-
for minibatch_index in xrange(n_train_batches):
581+
for minibatch_index in range(n_train_batches):
582582

583583
d_loss_wrt_params = ... # compute gradient
584584
params -= learning_rate * d_loss_wrt_params # gradient descent

doc/utilities.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ Tiling minibatches together is done for us by the
112112
else:
113113
channel_defaults = [0., 0., 0., 1.]
114114

115-
for i in xrange(4):
115+
for i in range(4):
116116
if X[i] is None:
117117
# if channel is None, fill it with zeros of the correct
118118
# dtype
@@ -134,8 +134,8 @@ Tiling minibatches together is done for us by the
134134
out_array = numpy.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
135135

136136

137-
for tile_row in xrange(tile_shape[0]):
138-
for tile_col in xrange(tile_shape[1]):
137+
for tile_row in range(tile_shape[0]):
138+
for tile_col in range(tile_shape[1]):
139139
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
140140
if scale_rows_to_unit_interval:
141141
# if we should scale values to be between 0 and 1

0 commit comments

Comments
 (0)