Skip to content

Commit 8c87ab7

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into data_convert
2 parents ca6fc14 + 6b52ec5 commit 8c87ab7

File tree

3 files changed

+170
-0
lines changed

3 files changed

+170
-0
lines changed

python/paddle/v2/dataset/common.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,3 +32,10 @@ def download(url, module_name, md5sum):
3232
shutil.copyfileobj(r.raw, f)
3333

3434
return filename
35+
36+
37+
def dict_add(a_dict, ele):
38+
if ele in a_dict:
39+
a_dict[ele] += 1
40+
else:
41+
a_dict[ele] = 1

python/paddle/v2/dataset/imdb.py

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
# /usr/bin/env python
2+
# -*- coding:utf-8 -*-
3+
4+
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
5+
#
6+
# Licensed under the Apache License, Version 2.0 (the "License");
7+
# you may not use this file except in compliance with the License.
8+
# You may obtain a copy of the License at
9+
#
10+
# http://www.apache.org/licenses/LICENSE-2.0
11+
#
12+
# Unless required by applicable law or agreed to in writing, software
13+
# distributed under the License is distributed on an "AS IS" BASIS,
14+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
# See the License for the specific language governing permissions and
16+
# limitations under the License.
17+
"""
18+
IMDB dataset: http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz
19+
"""
20+
import paddle.v2.dataset.common
21+
import tarfile
22+
import Queue
23+
import re
24+
import string
25+
import threading
26+
27+
__all__ = ['build_dict', 'train', 'test']
28+
29+
URL = 'http://ai.stanford.edu/%7Eamaas/data/sentiment/aclImdb_v1.tar.gz'
30+
MD5 = '7c2ac02c03563afcf9b574c7e56c153a'
31+
32+
33+
# Read files that match pattern. Tokenize and yield each file.
34+
def tokenize(pattern):
35+
with tarfile.open(paddle.v2.dataset.common.download(URL, 'imdb',
36+
MD5)) as tarf:
37+
# Note that we should use tarfile.next(), which does
38+
# sequential access of member files, other than
39+
# tarfile.extractfile, which does random access and might
40+
# destroy hard disks.
41+
tf = tarf.next()
42+
while tf != None:
43+
if bool(pattern.match(tf.name)):
44+
# newline and punctuations removal and ad-hoc tokenization.
45+
yield tarf.extractfile(tf).read().rstrip("\n\r").translate(
46+
None, string.punctuation).lower().split()
47+
tf = tarf.next()
48+
49+
50+
def build_dict(pattern, cutoff):
51+
word_freq = {}
52+
for doc in tokenize(pattern):
53+
for word in doc:
54+
paddle.v2.dataset.common.dict_add(word_freq, word)
55+
56+
# Not sure if we should prune less-frequent words here.
57+
word_freq = filter(lambda x: x[1] > cutoff, word_freq.items())
58+
59+
dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0]))
60+
words, _ = list(zip(*dictionary))
61+
word_idx = dict(zip(words, xrange(len(words))))
62+
word_idx['<unk>'] = len(words)
63+
return word_idx
64+
65+
66+
def reader_creator(pos_pattern, neg_pattern, word_idx, buffer_size):
67+
UNK = word_idx['<unk>']
68+
69+
qs = [Queue.Queue(maxsize=buffer_size), Queue.Queue(maxsize=buffer_size)]
70+
71+
def load(pattern, queue):
72+
for doc in tokenize(pattern):
73+
queue.put(doc)
74+
queue.put(None)
75+
76+
def reader():
77+
# Creates two threads that loads positive and negative samples
78+
# into qs.
79+
t0 = threading.Thread(
80+
target=load, args=(
81+
pos_pattern,
82+
qs[0], ))
83+
t0.daemon = True
84+
t0.start()
85+
86+
t1 = threading.Thread(
87+
target=load, args=(
88+
neg_pattern,
89+
qs[1], ))
90+
t1.daemon = True
91+
t1.start()
92+
93+
# Read alternatively from qs[0] and qs[1].
94+
i = 0
95+
doc = qs[i].get()
96+
while doc != None:
97+
yield [word_idx.get(w, UNK) for w in doc], i % 2
98+
i += 1
99+
doc = qs[i % 2].get()
100+
101+
# If any queue is empty, reads from the other queue.
102+
i += 1
103+
doc = qs[i % 2].get()
104+
while doc != None:
105+
yield [word_idx.get(w, UNK) for w in doc], i % 2
106+
doc = qs[i % 2].get()
107+
108+
return reader()
109+
110+
111+
def train(word_idx):
112+
return reader_creator(
113+
re.compile("aclImdb/train/pos/.*\.txt$"),
114+
re.compile("aclImdb/train/neg/.*\.txt$"), word_idx, 1000)
115+
116+
117+
def test(word_idx):
118+
return reader_creator(
119+
re.compile("aclImdb/test/pos/.*\.txt$"),
120+
re.compile("aclImdb/test/neg/.*\.txt$"), word_idx, 1000)
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import paddle.v2.dataset.imdb
2+
import unittest
3+
import re
4+
5+
TRAIN_POS_PATTERN = re.compile("aclImdb/train/pos/.*\.txt$")
6+
TRAIN_NEG_PATTERN = re.compile("aclImdb/train/neg/.*\.txt$")
7+
TRAIN_PATTERN = re.compile("aclImdb/train/.*\.txt$")
8+
9+
TEST_POS_PATTERN = re.compile("aclImdb/test/pos/.*\.txt$")
10+
TEST_NEG_PATTERN = re.compile("aclImdb/test/neg/.*\.txt$")
11+
TEST_PATTERN = re.compile("aclImdb/test/.*\.txt$")
12+
13+
14+
class TestIMDB(unittest.TestCase):
15+
word_idx = None
16+
17+
def test_build_dict(self):
18+
if self.word_idx == None:
19+
self.word_idx = paddle.v2.dataset.imdb.build_dict(TRAIN_PATTERN,
20+
150)
21+
22+
self.assertEqual(len(self.word_idx), 7036)
23+
24+
def check_dataset(self, dataset, expected_size):
25+
if self.word_idx == None:
26+
self.word_idx = paddle.v2.dataset.imdb.build_dict(TRAIN_PATTERN,
27+
150)
28+
29+
sum = 0
30+
for l in dataset(self.word_idx):
31+
self.assertEqual(l[1], sum % 2)
32+
sum += 1
33+
self.assertEqual(sum, expected_size)
34+
35+
def test_train(self):
36+
self.check_dataset(paddle.v2.dataset.imdb.train, 25000)
37+
38+
def test_test(self):
39+
self.check_dataset(paddle.v2.dataset.imdb.test, 25000)
40+
41+
42+
if __name__ == '__main__':
43+
unittest.main()

0 commit comments

Comments
 (0)