Skip to content

Commit b7481db

Browse files
committed
Merge pull request lisa-lab#63 from nouiz/master
Some update to the LSTM code.
2 parents b28a152 + 5482b18 commit b7481db

File tree

4 files changed

+417
-285
lines changed

4 files changed

+417
-285
lines changed

code/imdb.py

Lines changed: 64 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,22 @@
11
import cPickle
22
import gzip
33
import os
4-
import sys
5-
import time
64

75
import numpy
86

97
import theano
10-
import theano.tensor as T
118

129

1310
def prepare_data(seqs, labels, maxlen=None):
11+
"""Create the matrices from the datasets.
12+
13+
This pad each sequence to the same lenght: the lenght of the
14+
longuest sequence or maxlen.
15+
16+
if maxlen is set, we will cut all sequence to this maximum
17+
lenght.
18+
19+
"""
1420
# x: a list of sentences
1521
lengths = [len(s) for s in seqs]
1622

@@ -42,24 +48,73 @@ def prepare_data(seqs, labels, maxlen=None):
4248
return x, x_mask, labels
4349

4450

45-
def load_data(path="imdb.pkl", n_words=100000, valid_portion=0.1):
51+
def get_dataset_file(dataset, default_dataset, origin):
52+
'''Look for it as if it was a full path, if not, try local file,
53+
if not try in the data directory.
54+
55+
Download dataset if it is not present
56+
57+
'''
58+
data_dir, data_file = os.path.split(dataset)
59+
if data_dir == "" and not os.path.isfile(dataset):
60+
# Check if dataset is in the data directory.
61+
new_path = os.path.join(
62+
os.path.split(__file__)[0],
63+
"..",
64+
"data",
65+
dataset
66+
)
67+
if os.path.isfile(new_path) or data_file == default_dataset:
68+
dataset = new_path
69+
70+
if (not os.path.isfile(dataset)) and data_file == default_dataset:
71+
import urllib
72+
print 'Downloading data from %s' % origin
73+
urllib.urlretrieve(origin, dataset)
74+
return dataset
75+
76+
77+
def load_data(path="imdb.pkl", n_words=100000, valid_portion=0.1, maxlen=None):
4678
''' Loads the dataset
4779
48-
:type dataset: string
49-
:param dataset: the path to the dataset (here IMDB)
80+
:type path: String
81+
:param path: The path to the dataset (here IMDB)
82+
:type n_words: int
83+
:param n_words: The number of word to keep in the vocabulary.
84+
All extra words are set to unknow (1).
85+
:type valid_portion: float
86+
:param valid_portion: The proportion of the full train set used for
87+
the validation set.
88+
:type maxlen: None or positive int
89+
:param maxlen: the max sequence length we use in the train/valid set.
5090
'''
5191

5292
#############
5393
# LOAD DATA #
5494
#############
5595

56-
print '... loading data'
57-
5896
# Load the dataset
59-
f = open(path, 'rb')
97+
path = get_dataset_file(
98+
path, "imdb.pkl",
99+
"http://www.iro.umontreal.ca/~lisa/deep/data/imdb.pkl")
100+
101+
if path.endswith(".gz"):
102+
f = gzip.open(path, 'rb')
103+
else:
104+
f = open(path, 'rb')
105+
60106
train_set = cPickle.load(f)
61107
test_set = cPickle.load(f)
62108
f.close()
109+
if maxlen:
110+
new_train_set_x = []
111+
new_train_set_y = []
112+
for x, y in zip(train_set[0], train_set[1]):
113+
if len(x) < maxlen:
114+
new_train_set_x.append(x)
115+
new_train_set_y.append(y)
116+
train_set = (new_train_set_x, new_train_set_y)
117+
del new_train_set_x, new_train_set_y
63118

64119
# split training set into validation set
65120
train_set_x, train_set_y = train_set

code/imdb_preprocess.py

Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
"""
2+
This script is what created the dataset pickled.
3+
4+
1) You need to download this file and put it in the same directory as this file.
5+
https://github.com/moses-smt/mosesdecoder/raw/master/scripts/tokenizer/tokenizer.perl . Give it execution permission.
6+
7+
2) Get the dataset from http://ai.stanford.edu/~amaas/data/sentiment/ and extract it in the current directory.
8+
9+
3) Then run this script.
10+
"""
11+
12+
dataset_path='/Tmp/bastienf/aclImdb/'
13+
14+
import numpy
15+
import cPickle as pkl
16+
17+
from collections import OrderedDict
18+
19+
import glob
20+
import os
21+
22+
from subprocess import Popen, PIPE
23+
24+
# tokenizer.perl is from Moses: https://github.com/moses-smt/mosesdecoder/tree/master/scripts/tokenizer
25+
tokenizer_cmd = ['./tokenizer.perl', '-l', 'en', '-q', '-']
26+
27+
28+
def tokenize(sentences):
29+
30+
print 'Tokenizing..',
31+
text = "\n".join(sentences)
32+
tokenizer = Popen(tokenizer_cmd, stdin=PIPE, stdout=PIPE)
33+
tok_text, _ = tokenizer.communicate(text)
34+
toks = tok_text.split('\n')[:-1]
35+
print 'Done'
36+
37+
return toks
38+
39+
40+
def build_dict(path):
41+
sentences = []
42+
currdir = os.getcwd()
43+
os.chdir('%s/pos/' % path)
44+
for ff in glob.glob("*.txt"):
45+
with open(ff, 'r') as f:
46+
sentences.append(f.readline().strip())
47+
os.chdir('%s/neg/' % path)
48+
for ff in glob.glob("*.txt"):
49+
with open(ff, 'r') as f:
50+
sentences.append(f.readline().strip())
51+
os.chdir(currdir)
52+
53+
sentences = tokenize(sentences)
54+
55+
print 'Building dictionary..',
56+
wordcount = dict()
57+
for ss in sentences:
58+
words = ss.strip().lower().split()
59+
for w in words:
60+
if w not in wordcount:
61+
wordcount[w] = 1
62+
else:
63+
wordcount[w] += 1
64+
65+
counts = wordcount.values()
66+
keys = wordcount.keys()
67+
68+
sorted_idx = numpy.argsort(counts)[::-1]
69+
70+
worddict = dict()
71+
72+
for idx, ss in enumerate(sorted_idx):
73+
worddict[keys[ss]] = idx+2 # leave 0 and 1 (UNK)
74+
75+
print numpy.sum(counts), ' total words ', len(keys), ' unique words'
76+
77+
return worddict
78+
79+
80+
def grab_data(path, dictionary):
81+
sentences = []
82+
currdir = os.getcwd()
83+
os.chdir(path)
84+
for ff in glob.glob("*.txt"):
85+
with open(ff, 'r') as f:
86+
sentences.append(f.readline().strip())
87+
os.chdir(currdir)
88+
sentences = tokenize(sentences)
89+
90+
seqs = [None] * len(sentences)
91+
for idx, ss in enumerate(sentences):
92+
words = ss.strip().lower().split()
93+
seqs[idx] = [dictionary[w] if w in dictionary else 1 for w in words]
94+
95+
return seqs
96+
97+
98+
def main():
99+
# Get the dataset from http://ai.stanford.edu/~amaas/data/sentiment/
100+
path = dataset_path
101+
dictionary = build_dict(os.path.join(path, 'train'))
102+
103+
train_x_pos = grab_data(path+'train/pos', dictionary)
104+
train_x_neg = grab_data(path+'train/neg', dictionary)
105+
train_x = train_x_pos + train_x_neg
106+
train_y = [1] * len(train_x_pos) + [0] * len(train_x_neg)
107+
108+
test_x_pos = grab_data(path+'test/pos', dictionary)
109+
test_x_neg = grab_data(path+'test/neg', dictionary)
110+
test_x = test_x_pos + test_x_neg
111+
test_y = [1] * len(test_x_pos) + [0] * len(test_x_neg)
112+
113+
f = open('imdb.pkl', 'wb')
114+
pkl.dump((train_x, train_y), f, -1)
115+
pkl.dump((test_x, test_y), f, -1)
116+
f.close()
117+
118+
f = open('imdb.dict.pkl', 'wb')
119+
pkl.dump(dictionary, f, -1)
120+
f.close()
121+
122+
if __name__ == '__main__':
123+
main()

0 commit comments

Comments
 (0)