Skip to content
This repository was archived by the owner on Jan 2, 2021. It is now read-only.

Commit 9bba985

Browse files
committed
Improve pre-processing steps to include more randomness: scales, blur...
1 parent 149ae4e commit 9bba985

File tree

1 file changed

+12
-9
lines changed

1 file changed

+12
-9
lines changed

enhance.py

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
add_arg('--rendering-overlap', default=32, type=int, help='Number of pixels padding around each tile.')
4242
add_arg('--model', default='small', type=str, help='Name of the neural network to load/save.')
4343
add_arg('--train', default=False, type=str, help='File pattern to load for training.')
44+
add_arg('--train-scales', default=0, type=int, help='Randomly resize images this many times.')
4445
add_arg('--train-blur', default=None, type=int, help='Sigma value for gaussian blur preprocess.')
4546
add_arg('--train-noise', default=None, type=float, help='Radius for preprocessing gaussian blur.')
4647
add_arg('--train-jpeg', default=None, type=int, help='JPEG compression level in preprocessing.')
@@ -52,7 +53,7 @@
5253
add_arg('--buffer-size', default=1500, type=int, help='Total image fragments kept in cache.')
5354
add_arg('--buffer-similar', default=5, type=int, help='Fragments cached for each image loaded.')
5455
add_arg('--learning-rate', default=1E-4, type=float, help='Parameter for the ADAM optimizer.')
55-
add_arg('--learning-period', default=50, type=int, help='How often to decay the learning rate.')
56+
add_arg('--learning-period', default=75, type=int, help='How often to decay the learning rate.')
5657
add_arg('--learning-decay', default=0.5, type=float, help='How much to decay the learning rate.')
5758
add_arg('--generator-upscale', default=2, type=int, help='Steps of 2x up-sampling as post-process.')
5859
add_arg('--generator-downscale',default=0, type=int, help='Steps of 2x down-sampling as preprocess.')
@@ -161,8 +162,9 @@ def add_to_buffer(self, f):
161162
filename = os.path.join(self.cwd, f)
162163
try:
163164
orig = PIL.Image.open(filename).convert('RGB')
164-
# if all(s > args.batch_shape * 2 for s in orig.size):
165-
# orig = orig.resize((orig.size[0]//2, orig.size[1]//2), resample=PIL.Image.LANCZOS)
165+
scale = 2 ** random.randint(0, args.train_scales)
166+
if scale > 1 and all(s > args.batch_shape * scale for s in orig.size):
167+
orig = orig.resize((orig.size[0]//scale, orig.size[1]//scale), resample=PIL.Image.LANCZOS)
166168
if any(s < args.batch_shape for s in orig.size):
167169
raise ValueError('Image is too small for training with size {}'.format(orig.size))
168170
except Exception as e:
@@ -171,19 +173,20 @@ def add_to_buffer(self, f):
171173
self.files.remove(f)
172174
return
173175

174-
seed = orig.filter(PIL.ImageFilter.GaussianBlur(radius=args.train_blur)) if args.train_blur else orig
175-
seed = seed.resize((orig.size[0]//args.zoom, orig.size[1]//args.zoom), resample=PIL.Image.LANCZOS)
176-
176+
if args.train_blur:
177+
seed = orig.filter(PIL.ImageFilter.GaussianBlur(radius=random.randint(0, args.train_blur*2)))
178+
if args.zoom > 1:
179+
seed = seed.resize((orig.size[0]//args.zoom, orig.size[1]//args.zoom), resample=PIL.Image.LANCZOS)
177180
if args.train_jpeg:
178181
buffer = io.BytesIO()
179182
seed.save(buffer, format='jpeg', quality=args.train_jpeg+random.randrange(-15,+15))
180183
seed = PIL.Image.open(buffer)
181184

185+
orig = scipy.misc.fromimage(orig, mode='RGB').astype(np.float32)
182186
seed = scipy.misc.fromimage(seed, mode='RGB').astype(np.float32)
183-
seed += scipy.random.normal(scale=args.train_noise, size=(seed.shape[0], seed.shape[1], 1))\
184-
if args.train_noise else 0.0
185187

186-
orig = scipy.misc.fromimage(orig).astype(np.float32)
188+
if args.train_noise:
189+
seed += scipy.random.normal(scale=args.train_noise, size=(seed.shape[0], seed.shape[1], 1)) ** 4.0
187190

188191
for _ in range(args.buffer_similar):
189192
h = random.randint(0, seed.shape[0] - self.seed_shape)

0 commit comments

Comments
 (0)