Skip to content

Commit 6430b24

Browse files
committed
examples support TF1.3
1 parent be7bb90 commit 6430b24

File tree

2 files changed

+13
-12
lines changed

2 files changed

+13
-12
lines changed

example/tutorial_generate_text.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
"""Example of Synced sequence input and output.
2222
Generate text using LSTM.
2323
24-
Data: https://github.com/zsdonghao/tensorlayer/tree/master/example/data/trump
24+
Data: https://github.com/zsdonghao/tensorlayer/tree/master/example/data/
2525
2626
"""
2727

@@ -201,7 +201,7 @@ def main_lstm_generate_text():
201201
model_file_name = "model_generate_text.npz"
202202

203203
##===== Prepare Data
204-
words = customized_read_words(input_fpath="trump_text.txt")
204+
words = customized_read_words(input_fpath="data/trump/trump_text.txt")
205205

206206
vocab = tl.nlp.create_vocab([words], word_counts_output_file='vocab.txt', min_word_count=1)
207207
vocab = tl.nlp.Vocabulary('vocab.txt', unk_word="<UNK>")

example/tutorial_tfrecord3.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def _bytes_feature_list(values):
6262
VOC_FIR = cwd + '/vocab.txt'
6363
# read image captions from JSON
6464
with tf.gfile.FastGFile(SEQ_FIR, "r") as f:
65-
caption_data = json.loads(str(f.read(), encoding = "utf-8"))
65+
caption_data = json.loads(str(f.read()))#, encoding = "utf-8"))
6666

6767
processed_capts, img_capts = [], []
6868
for idx in range(len(caption_data['images'])):
@@ -227,8 +227,8 @@ def distort_image(image, thread_id):
227227
# image_summary("final_image", image)
228228
#
229229
# # Rescale to [-1,1] instead of [0, 1]
230-
# image = tf.sub(image, 0.5)
231-
# image = tf.mul(image, 2.0)
230+
# image = tf.subtract(image, 0.5)
231+
# image = tf.multiply(image, 2.0)
232232
# return image
233233

234234
def prefetch_input_data(reader,
@@ -298,7 +298,8 @@ def prefetch_input_data(reader,
298298
enqueue_ops.append(values_queue.enqueue([value]))
299299
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
300300
values_queue, enqueue_ops))
301-
tf.scalar_summary(
301+
302+
tf.summary.scalar(
302303
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
303304
tf.cast(values_queue.size(), tf.float32) * (1. / capacity))
304305

@@ -356,8 +357,8 @@ def prefetch_input_data(reader,
356357
if is_training:
357358
img = distort_image(img, thread_id=0)
358359
# Rescale to [-1, 1] instead of [0, 1]
359-
img = tf.sub(img, 0.5)
360-
img = tf.mul(img, 2.0)
360+
img = tf.subtract(img, 0.5)
361+
img = tf.multiply(img, 2.0)
361362
img_cap = sequence["image/caption"]
362363
img_cap_ids = sequence["image/caption_ids"]
363364
img_batch, img_cap_batch, img_cap_ids_batch = tf.train.batch([img, img_cap, img_cap_ids], # Note: shuffle_batch doesn't support dynamic_pad
@@ -443,7 +444,7 @@ def batch_with_dynamic_pad(images_and_captions,
443444
enqueue_list = []
444445
for image, caption in images_and_captions:
445446
caption_length = tf.shape(caption)[0]
446-
input_length = tf.expand_dims(tf.sub(caption_length, 1), 0)
447+
input_length = tf.expand_dims(tf.subtract(caption_length, 1), 0)
447448

448449
input_seq = tf.slice(caption, [0], input_length)
449450
target_seq = tf.slice(caption, [1], input_length)
@@ -459,9 +460,9 @@ def batch_with_dynamic_pad(images_and_captions,
459460

460461
if add_summaries:
461462
lengths = tf.add(tf.reduce_sum(mask, 1), 1)
462-
tf.scalar_summary("caption_length/batch_min", tf.reduce_min(lengths))
463-
tf.scalar_summary("caption_length/batch_max", tf.reduce_max(lengths))
464-
tf.scalar_summary("caption_length/batch_mean", tf.reduce_mean(lengths))
463+
tf.summary.scalar("caption_length/batch_min", tf.reduce_min(lengths))
464+
tf.summary.scalar("caption_length/batch_max", tf.reduce_max(lengths))
465+
tf.summary.scalar("caption_length/batch_mean", tf.reduce_mean(lengths))
465466

466467
return images, input_seqs, target_seqs, mask
467468

0 commit comments

Comments
 (0)