Skip to content

Commit b3f650d

Browse files
authored
Merge pull request #10889 from kexinzhao/understand_sentiment_lod
Modify understand sentiment example using new LoDTensor API
2 parents d4c2164 + 8cce330 commit b3f650d

File tree

4 files changed

+60
-44
lines changed

4 files changed

+60
-44
lines changed

python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -121,17 +121,21 @@ def infer(use_cuda, inference_program, save_dirname=None):
121121
param_path=save_dirname,
122122
place=place)
123123

124-
def create_random_lodtensor(lod, place, low, high):
125-
data = np.random.random_integers(low, high,
126-
[lod[-1], 1]).astype("int64")
127-
res = fluid.LoDTensor()
128-
res.set(data, place)
129-
res.set_lod([lod])
130-
return res
131-
132-
lod = [0, 4, 10]
133-
tensor_words = create_random_lodtensor(
134-
lod, place, low=0, high=len(word_dict) - 1)
124+
# Setup input by creating LoDTensor to represent sequence of words.
125+
# Here each word is the basic element of the LoDTensor and the shape of
126+
# each word (base_shape) should be [1] since it is simply an index to
127+
# look up for the corresponding word vector.
128+
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
129+
# which has only one lod level. Then the created LoDTensor will have only
130+
# one higher level structure (sequence of words, or sentence) than the basic
131+
# element (word). Hence the LoDTensor will hold data for three sentences of
132+
# length 3, 4 and 2, respectively.
133+
# Note that lod info should be a list of lists.
134+
lod = [[3, 4, 2]]
135+
base_shape = [1]
136+
# The range of random integers is [low, high]
137+
tensor_words = fluid.create_random_int_lodtensor(
138+
lod, base_shape, place, low=0, high=len(word_dict) - 1)
135139
results = inferencer.infer({'words': tensor_words})
136140
print("infer results: ", results)
137141

python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -136,17 +136,21 @@ def infer(use_cuda, inference_program, save_dirname=None):
136136
param_path=save_dirname,
137137
place=place)
138138

139-
def create_random_lodtensor(lod, place, low, high):
140-
data = np.random.random_integers(low, high,
141-
[lod[-1], 1]).astype("int64")
142-
res = fluid.LoDTensor()
143-
res.set(data, place)
144-
res.set_lod([lod])
145-
return res
146-
147-
lod = [0, 4, 10]
148-
tensor_words = create_random_lodtensor(
149-
lod, place, low=0, high=len(word_dict) - 1)
139+
# Setup input by creating LoDTensor to represent sequence of words.
140+
# Here each word is the basic element of the LoDTensor and the shape of
141+
# each word (base_shape) should be [1] since it is simply an index to
142+
# look up for the corresponding word vector.
143+
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
144+
# which has only one lod level. Then the created LoDTensor will have only
145+
# one higher level structure (sequence of words, or sentence) than the basic
146+
# element (word). Hence the LoDTensor will hold data for three sentences of
147+
# length 3, 4 and 2, respectively.
148+
# Note that lod info should be a list of lists.
149+
lod = [[3, 4, 2]]
150+
base_shape = [1]
151+
# The range of random integers is [low, high]
152+
tensor_words = fluid.create_random_int_lodtensor(
153+
lod, base_shape, place, low=0, high=len(word_dict) - 1)
150154
results = inferencer.infer({'words': tensor_words})
151155
print("infer results: ", results)
152156

python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -128,17 +128,21 @@ def infer(use_cuda, inference_program, save_dirname=None):
128128
param_path=save_dirname,
129129
place=place)
130130

131-
def create_random_lodtensor(lod, place, low, high):
132-
data = np.random.random_integers(low, high,
133-
[lod[-1], 1]).astype("int64")
134-
res = fluid.LoDTensor()
135-
res.set(data, place)
136-
res.set_lod([lod])
137-
return res
138-
139-
lod = [0, 4, 10]
140-
tensor_words = create_random_lodtensor(
141-
lod, place, low=0, high=len(word_dict) - 1)
131+
# Setup input by creating LoDTensor to represent sequence of words.
132+
# Here each word is the basic element of the LoDTensor and the shape of
133+
# each word (base_shape) should be [1] since it is simply an index to
134+
# look up for the corresponding word vector.
135+
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
136+
# which has only one lod level. Then the created LoDTensor will have only
137+
# one higher level structure (sequence of words, or sentence) than the basic
138+
# element (word). Hence the LoDTensor will hold data for three sentences of
139+
# length 3, 4 and 2, respectively.
140+
# Note that lod info should be a list of lists.
141+
lod = [[3, 4, 2]]
142+
base_shape = [1]
143+
# The range of random integers is [low, high]
144+
tensor_words = fluid.create_random_int_lodtensor(
145+
lod, base_shape, place, low=0, high=len(word_dict) - 1)
142146
results = inferencer.infer({'words': tensor_words})
143147
print("infer results: ", results)
144148

python/paddle/fluid/tests/book/notest_understand_sentiment.py

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -125,14 +125,6 @@ def stacked_lstm_net(data,
125125
return avg_cost, accuracy, prediction
126126

127127

128-
def create_random_lodtensor(lod, place, low, high):
129-
data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64")
130-
res = fluid.LoDTensor()
131-
res.set(data, place)
132-
res.set_lod([lod])
133-
return res
134-
135-
136128
def train(word_dict,
137129
net_method,
138130
use_cuda,
@@ -242,9 +234,21 @@ def infer(word_dict, use_cuda, save_dirname=None):
242234

243235
word_dict_len = len(word_dict)
244236

245-
lod = [0, 4, 10]
246-
tensor_words = create_random_lodtensor(
247-
lod, place, low=0, high=word_dict_len - 1)
237+
# Setup input by creating LoDTensor to represent sequence of words.
238+
# Here each word is the basic element of the LoDTensor and the shape of
239+
# each word (base_shape) should be [1] since it is simply an index to
240+
# look up for the corresponding word vector.
241+
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
242+
# which has only one lod level. Then the created LoDTensor will have only
243+
# one higher level structure (sequence of words, or sentence) than the basic
244+
# element (word). Hence the LoDTensor will hold data for three sentences of
245+
# length 3, 4 and 2, respectively.
246+
# Note that lod info should be a list of lists.
247+
lod = [[3, 4, 2]]
248+
base_shape = [1]
249+
# The range of random integers is [low, high]
250+
tensor_words = fluid.create_random_int_lodtensor(
251+
lod, base_shape, place, low=0, high=word_dict_len - 1)
248252

249253
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
250254
# and results will contain a list of data corresponding to fetch_targets.

0 commit comments

Comments
 (0)