|
| 1 | +import os |
| 2 | +import numpy as np |
| 3 | +import tensorflow as tf |
| 4 | +import tensorlayer as tl |
| 5 | +## enable debug logging |
| 6 | +tl.logging.set_verbosity(tl.logging.DEBUG) |
| 7 | + |
| 8 | +class FLAGS(object): |
| 9 | + def __init__(self): |
| 10 | + self.n_epoch = 25 # "Epoch to train [25]" |
| 11 | + self.z_dim = 100 # "Num of noise value]" |
| 12 | + self.learning_rate = 0.0002 # "Learning rate of for adam [0.0002]") |
| 13 | + self.beta1 = 0.5 # "Momentum term of adam [0.5]") |
| 14 | + self.batch_size = 64 # "The number of batch images [64]") |
| 15 | + self.output_size = 64 # "The size of the output images to produce [64]") |
| 16 | + self.sample_size = 64 # "The number of sample images [64]") |
| 17 | + self.c_dim = 3 # "Number of image channels. [3]") |
| 18 | + self.save_step = 500 # "The interval of saveing checkpoints. [500]") |
| 19 | + # self.dataset = "celebA" # "The name of dataset [celebA, mnist, lsun]") |
| 20 | + self.checkpoint_dir = "checkpoint" # "Directory name to save the checkpoints [checkpoint]") |
| 21 | + self.sample_dir = "samples" # "Directory name to save the image samples [samples]") |
| 22 | + assert np.sqrt(self.sample_size) % 1 == 0., 'Flag `sample_size` needs to be a perfect square' |
| 23 | +flags = FLAGS() |
| 24 | + |
| 25 | +tl.files.exists_or_mkdir(flags.checkpoint_dir) # save model |
| 26 | +tl.files.exists_or_mkdir(flags.sample_dir) # save generated image |
| 27 | + |
| 28 | +def get_celebA(output_size, n_epoch, batch_size): |
| 29 | + # dataset API and augmentation |
| 30 | + images_path = tl.files.load_file_list(path='data', regx='.*.jpg', keep_prefix=True, printable=False) |
| 31 | + def generator_train(): |
| 32 | + for image_path in images_path: |
| 33 | + yield image_path.encode('utf-8') |
| 34 | + def _map_fn(image_path): |
| 35 | + image = tf.io.read_file(image_path) |
| 36 | + image = tf.image.decode_jpeg(image, channels=3) # get RGB with 0~1 |
| 37 | + image = tf.image.convert_image_dtype(image, dtype=tf.float32) |
| 38 | + # image = tf.image.crop_central(image, [FLAGS.output_size, FLAGS.output_size, FLAGS.c_dim]) |
| 39 | + # image = tf.image.resize_images(image, FLAGS.output_size]) |
| 40 | + image = image[45:173, 25:153, :] # central crop |
| 41 | + image = tf.image.resize([image], (output_size, output_size))[0] |
| 42 | + # image = tf.image.crop_and_resize(image, boxes=[[]], crop_size=[64, 64]) |
| 43 | + # image = tf.image.resize_image_with_crop_or_pad(image, FLAGS.output_size, FLAGS.output_size) # central crop |
| 44 | + image = tf.image.random_flip_left_right(image) |
| 45 | + image = image * 2 - 1 |
| 46 | + return image |
| 47 | + train_ds = tf.data.Dataset.from_generator(generator_train, output_types=tf.string) |
| 48 | + ds = train_ds.shuffle(buffer_size=4096) |
| 49 | + # ds = ds.shard(num_shards=hvd.size(), index=hvd.rank()) |
| 50 | + ds = ds.repeat(n_epoch) |
| 51 | + ds = ds.map(_map_fn, num_parallel_calls=4) |
| 52 | + ds = ds.batch(batch_size) |
| 53 | + ds = ds.prefetch(buffer_size=2) |
| 54 | + return ds, images_path |
| 55 | + # for batch_images in train_ds: |
| 56 | + # print(batch_images.shape) |
| 57 | + # value = ds.make_one_shot_iterator().get_next() |
0 commit comments