From fb0db990f1f29df0e47459a57de99b09843eba19 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Thu, 28 Apr 2016 20:42:00 +0200 Subject: [PATCH 01/58] Experiments with new convolutional architectures. --- doodle.py | 64 +++++++++++++++++++++++++------------------------------ 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/doodle.py b/doodle.py index 3ff3819..5c79229 100755 --- a/doodle.py +++ b/doodle.py @@ -37,7 +37,7 @@ add_arg('--phases', default=3, type=int, help='Number of image scales to process in phases.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') add_arg('--cache', default=0, type=int, help='Whether to compute matches only once.') -add_arg('--smoothness', default=1E+0, type=float, help='Weight of image smoothing scheme.') +add_arg('--smoothness', default=0E+0, type=float, help='Weight of image smoothing scheme.') add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') add_arg('--seed', default='noise', type=str, help='Seed image path, "noise" or "content".') add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') @@ -106,8 +106,6 @@ class Model(object): """ def __init__(self): - self.pixel_mean = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((3,1,1)) - self.setup_model() self.load_data() @@ -118,32 +116,25 @@ def setup_model(self): net, self.channels = {}, {} # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused). - net['img'] = InputLayer((1, 3, None, None)) - net['conv1_1'] = ConvLayer(net['img'], 64, 3, pad=1) - net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) - net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') - net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) - net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) - net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') - net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) - net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) - net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) - net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) - net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') - net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) - net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) - net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) - net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) - net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') - net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) - net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) - net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) - net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) - net['main'] = net['conv5_4'] + custom = {'nonlinearity': lasagne.nonlinearities.elu} + net['img'] = InputLayer((None, 3, None, None)) + net['conv1_1'] = ConvLayer(net['img'], 64, 5, pad=2, **custom) + net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, **custom) + net['conv1_3'] = ConvLayer(net['conv1_2'], 64, 4, pad=1, stride=(2,2), **custom) + net['conv2_1'] = ConvLayer(net['conv1_3'], 128, 3, pad=1, **custom) + net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, **custom) + net['conv2_3'] = ConvLayer(net['conv2_2'], 128, 4, pad=1, stride=(2,2), **custom) + net['conv3_1'] = ConvLayer(net['conv2_3'], 192, 3, pad=1, **custom) + net['conv3_2'] = ConvLayer(net['conv3_1'], 192, 3, pad=1, **custom) + net['conv3_3'] = ConvLayer(net['conv3_2'], 192, 4, pad=1, stride=(2,2), **custom) + net['conv4_1'] = ConvLayer(net['conv3_3'], 256, 3, pad=1, **custom) + net['conv4_2'] = ConvLayer(net['conv4_1'], 256, 3, pad=1, **custom) + net['conv4_3'] = ConvLayer(net['conv4_2'], 256, 4, pad=1, stride=(2,2), **custom) + net['main'] = net['conv4_3'] # Auxiliary network for the semantic layers, and the nearest neighbors calculations. net['map'] = InputLayer((1, 1, None, None)) - for j, i in itertools.product(range(5), range(4)): + for j, i in itertools.product(range(4), range(3)): if j < 2 and i > 1: continue suffix = '%i_%i' % (j+1, i+1) @@ -164,14 +155,17 @@ def setup_model(self): def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - vgg19_file = os.path.join(os.path.dirname(__file__), 'vgg19_conv.pkl.bz2') - if not os.path.exists(vgg19_file): + data_file = os.path.join(os.path.dirname(__file__), 'wgg_conv.pkl.bz2') + if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download here...", "https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2") - data = pickle.load(bz2.open(vgg19_file, 'rb')) - params = lasagne.layers.get_all_param_values(self.network['main']) - lasagne.layers.set_all_param_values(self.network['main'], data[:len(params)]) + data = pickle.load(bz2.open(data_file, 'rb')) + for layer, values in data.items(): + layer = layer.replace('enc', 'conv') + if layer not in self.network: continue + for p, v in zip(self.network[layer].get_params(), values): + p.set_value(v) def setup(self, layers): """Setup the inputs and outputs, knowing the layers that are required by the optimization algorithm. @@ -193,7 +187,7 @@ def prepare_image(self, image): the resolution. """ image = np.swapaxes(np.swapaxes(image, 1, 2), 0, 1)[::-1, :, :] - image = image.astype(np.float32) - self.pixel_mean + image = image.astype(np.float32) / 127.5 - 1.0 return image[np.newaxis] def finalize_image(self, image, resolution): @@ -514,7 +508,7 @@ def evaluate(self, Xn): """Callback for the L-BFGS optimization that computes the loss and gradients on the GPU. """ # Adjust the representation to be compatible with the model before computing results. - current_img = Xn.reshape(self.content_img.shape).astype(np.float32) - self.model.pixel_mean + current_img = Xn.reshape(self.content_img.shape).astype(np.float32) / 127.5 - 1.0 current_features = self.compute_features(current_img, self.content_map) # Iterate through each of the style layers one by one, computing best matches. @@ -548,7 +542,7 @@ def evaluate(self, Xn): # Print more information to the console every few iterations. if args.print_every and self.frame % args.print_every == 0: - print('{:>3} {}loss{} {:8.2e} '.format(self.frame, ansi.BOLD, ansi.ENDC, loss / 1000.0), end='') + print('{:>3} {}loss{} {:8.2e} '.format(self.frame, ansi.BOLD, ansi.ENDC, loss / 1.0), end='') category = '' for v, l in zip(losses, self.losses): if l[0] == 'smooth': @@ -556,7 +550,7 @@ def evaluate(self, Xn): if l[0] != category: print(' {}{}{}'.format(ansi.BOLD, l[0], ansi.ENDC), end='') category = l[0] - print(' {}{}{} {:8.2e} '.format(ansi.BOLD, l[1], ansi.ENDC, v / 1000.0), end='') + print(' {}{}{} {:8.2e} '.format(ansi.BOLD, l[1], ansi.ENDC, v / 1.0), end='') current_time = time.time() quality = 100.0 - 100.0 * np.sqrt(self.error / 255.0) From 270524d6f0d85fbeec53b17b72b4c362e5d98a72 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Mon, 2 May 2016 20:27:07 +0200 Subject: [PATCH 02/58] Integrated new model, works but doesn't have enough capacity? --- doodle.py | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/doodle.py b/doodle.py index 5c79229..dcbea92 100755 --- a/doodle.py +++ b/doodle.py @@ -25,11 +25,11 @@ add_arg = parser.add_argument add_arg('--content', default=None, type=str, help='Content image path as optimization target.') -add_arg('--content-weight', default=10.0, type=float, help='Weight of content relative to style.') -add_arg('--content-layers', default='4_2', type=str, help='The layer with which to match content.') +add_arg('--content-weight', default=2500.0, type=float, help='Weight of content relative to style.') +add_arg('--content-layers', default='4_1', type=str, help='The layer with which to match content.') add_arg('--style', default=None, type=str, help='Style image path to extract patches.') -add_arg('--style-weight', default=25.0, type=float, help='Weight of style relative to content.') -add_arg('--style-layers', default='3_1,4_1', type=str, help='The layers to match style patches.') +add_arg('--style-weight', default=2500.0, type=float, help='Weight of style relative to content.') +add_arg('--style-layers', default='4_1,3_1', type=str, help='The layers to match style patches.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=10.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') @@ -118,24 +118,20 @@ def setup_model(self): # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused). custom = {'nonlinearity': lasagne.nonlinearities.elu} net['img'] = InputLayer((None, 3, None, None)) - net['conv1_1'] = ConvLayer(net['img'], 64, 5, pad=2, **custom) - net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, **custom) - net['conv1_3'] = ConvLayer(net['conv1_2'], 64, 4, pad=1, stride=(2,2), **custom) - net['conv2_1'] = ConvLayer(net['conv1_3'], 128, 3, pad=1, **custom) - net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, **custom) - net['conv2_3'] = ConvLayer(net['conv2_2'], 128, 4, pad=1, stride=(2,2), **custom) - net['conv3_1'] = ConvLayer(net['conv2_3'], 192, 3, pad=1, **custom) - net['conv3_2'] = ConvLayer(net['conv3_1'], 192, 3, pad=1, **custom) - net['conv3_3'] = ConvLayer(net['conv3_2'], 192, 4, pad=1, stride=(2,2), **custom) - net['conv4_1'] = ConvLayer(net['conv3_3'], 256, 3, pad=1, **custom) - net['conv4_2'] = ConvLayer(net['conv4_1'], 256, 3, pad=1, **custom) - net['conv4_3'] = ConvLayer(net['conv4_2'], 256, 4, pad=1, stride=(2,2), **custom) - net['main'] = net['conv4_3'] + net['conv1_1'] = ConvLayer(net['img'], 48, 3, pad=1, **custom) + net['conv1_2'] = ConvLayer(net['conv1_1'], 48, 3, pad=1, **custom) + net['conv2_1'] = ConvLayer(net['conv1_2'], 72, 2, pad=0, stride=(2,2), **custom) + net['conv2_2'] = ConvLayer(net['conv2_1'], 72, 3, pad=1, **custom) + net['conv3_1'] = ConvLayer(net['conv2_2'], 96, 2, pad=0, stride=(2,2), **custom) + net['conv3_2'] = ConvLayer(net['conv3_1'], 96, 3, pad=1, **custom) + net['conv3_3'] = ConvLayer(net['conv3_2'], 96, 3, pad=1, **custom) + net['conv4_1'] = ConvLayer(net['conv3_2'], 120, 2, pad=0, stride=(2,2), **custom) # Auxiliary network for the semantic layers, and the nearest neighbors calculations. net['map'] = InputLayer((1, 1, None, None)) for j, i in itertools.product(range(4), range(3)): - if j < 2 and i > 1: continue + if j < 3 and i > 1: continue + if j == 3 and i > 0: continue suffix = '%i_%i' % (j+1, i+1) if i == 0: @@ -155,7 +151,7 @@ def setup_model(self): def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - data_file = os.path.join(os.path.dirname(__file__), 'wgg_conv.pkl.bz2') + data_file = os.path.join(os.path.dirname(__file__), 'ngg_conv.pkl.bz2') if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download here...", "https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2") @@ -165,6 +161,7 @@ def load_data(self): layer = layer.replace('enc', 'conv') if layer not in self.network: continue for p, v in zip(self.network[layer].get_params(), values): + assert p.get_value().shape == v.shape, "Layer {}: Expected size {}.".format(layer, v.shape) p.set_value(v) def setup(self, layers): @@ -542,7 +539,7 @@ def evaluate(self, Xn): # Print more information to the console every few iterations. if args.print_every and self.frame % args.print_every == 0: - print('{:>3} {}loss{} {:8.2e} '.format(self.frame, ansi.BOLD, ansi.ENDC, loss / 1.0), end='') + print('{:>3} {}loss{} {:8.2e} '.format(self.frame, ansi.BOLD, ansi.ENDC, float(loss)), end='') category = '' for v, l in zip(losses, self.losses): if l[0] == 'smooth': @@ -550,7 +547,7 @@ def evaluate(self, Xn): if l[0] != category: print(' {}{}{}'.format(ansi.BOLD, l[0], ansi.ENDC), end='') category = l[0] - print(' {}{}{} {:8.2e} '.format(ansi.BOLD, l[1], ansi.ENDC, v / 1.0), end='') + print(' {}{}{} {:8.2e} '.format(ansi.BOLD, l[1], ansi.ENDC, float(v)), end='') current_time = time.time() quality = 100.0 - 100.0 * np.sqrt(self.error / 255.0) @@ -590,7 +587,7 @@ def run(self): # Setup the seed for the optimization as specified by the user. shape = self.content_img.shape[2:] if args.seed == 'content': - Xn = self.content_img[0] + self.model.pixel_mean + Xn = (self.content_img[0] + 1.0) * 127.5 if args.seed == 'noise': bounds = [int(i) for i in args.seed_range.split(':')] Xn = np.random.uniform(bounds[0], bounds[1], shape + (3,)).astype(np.float32) From 1bcbf2c076266ad6b5e78f45c994f90c69f603c5 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Wed, 4 May 2016 20:30:40 +0200 Subject: [PATCH 03/58] Prototype of feed-forward architecture. --- doodle.py | 164 +++++++++++++++++++++++------------------------------- 1 file changed, 70 insertions(+), 94 deletions(-) diff --git a/doodle.py b/doodle.py index 033c0fc..f262d90 100755 --- a/doodle.py +++ b/doodle.py @@ -29,7 +29,7 @@ add_arg('--content-layers', default='4_1', type=str, help='The layer with which to match content.') add_arg('--style', default=None, type=str, help='Style image path to extract patches.') add_arg('--style-weight', default=2500.0, type=float, help='Weight of style relative to content.') -add_arg('--style-layers', default='4_1,3_1', type=str, help='The layers to match style patches.') +add_arg('--style-layers', default='4_1', type=str, help='The layers to match style patches.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=10.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') @@ -39,7 +39,7 @@ add_arg('--cache', default=0, type=int, help='Whether to compute matches only once.') add_arg('--smoothness', default=0E+0, type=float, help='Weight of image smoothing scheme.') add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') -add_arg('--seed', default='noise', type=str, help='Seed image path, "noise" or "content".') +add_arg('--seed', default='content', type=str, help='Seed image path, "noise" or "content".') add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') add_arg('--iterations', default=100, type=int, help='Number of iterations to run each resolution.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') @@ -92,7 +92,7 @@ def error(message, *lines): # Deep Learning Framework import lasagne -from lasagne.layers import Conv2DLayer as ConvLayer, Pool2DLayer as PoolLayer +from lasagne.layers import Conv2DLayer as ConvLayer, Deconv2DLayer as DeconvLayer, Pool2DLayer as PoolLayer from lasagne.layers import InputLayer, ConcatLayer print('{} - Using device `{}` for processing the images.{}'.format(ansi.CYAN, theano.config.device, ansi.ENDC)) @@ -115,33 +115,50 @@ def setup_model(self, input=None): """ net, self.channels = {}, {} - # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused). + def DecvLayer(copy, input, channels, **args): + return DeconvLayer(input, channels, copy.filter_size, stride=copy.stride, crop=copy.pad, + nonlinearity=args.get('nonlinearity', lasagne.nonlinearities.elu)) + custom = {'nonlinearity': lasagne.nonlinearities.elu} - net['img'] = InputLayer((None, 3, None, None)) - net['conv1_1'] = ConvLayer(net['img'], 48, 3, pad=1, **custom) - net['conv1_2'] = ConvLayer(net['conv1_1'], 48, 3, pad=1, **custom) - net['conv2_1'] = ConvLayer(net['conv1_2'], 72, 2, pad=0, stride=(2,2), **custom) - net['conv2_2'] = ConvLayer(net['conv2_1'], 72, 3, pad=1, **custom) - net['conv3_1'] = ConvLayer(net['conv2_2'], 96, 2, pad=0, stride=(2,2), **custom) - net['conv3_2'] = ConvLayer(net['conv3_1'], 96, 3, pad=1, **custom) - net['conv3_3'] = ConvLayer(net['conv3_2'], 96, 3, pad=1, **custom) - net['conv4_1'] = ConvLayer(net['conv3_2'], 120, 2, pad=0, stride=(2,2), **custom) + # Encoder part of the neural network, takes an input image and turns it into abstract patterns. + net['img'] = input or InputLayer((1, 3, None, None)) + net['enc1_1'] = ConvLayer(net['img'], 48, 3, pad=1, **custom) + net['enc1_2'] = ConvLayer(net['enc1_1'], 48, 3, pad=1, **custom) + net['enc2_1'] = ConvLayer(net['enc1_2'], 96, 2, pad=0, stride=(2,2), **custom) + net['enc2_2'] = ConvLayer(net['enc2_1'], 96, 3, pad=1, **custom) + net['enc3_1'] = ConvLayer(net['enc2_2'], 192, 2, pad=0, stride=(2,2), **custom) + net['enc3_2'] = ConvLayer(net['enc3_1'], 192, 3, pad=1, **custom) + net['enc3_3'] = ConvLayer(net['enc3_2'], 192, 3, pad=1, **custom) + net['enc4_1'] = ConvLayer(net['enc3_3'], 384, 2, pad=0, stride=(2,2), **custom) + + # Decoder part of the neural network, takes abstract patterns and converts them into an image! + self.tensor_middle = T.tensor4() + net['mid'] = InputLayer((1, 384, None, None), var=self.tensor_middle) + net['dec4_1'] = DecvLayer(net['enc4_1'], net['mid'], 192) + net['dec3_3'] = DecvLayer(net['enc3_3'], net['dec4_1'], 192) + net['dec3_2'] = DecvLayer(net['enc3_2'], net['dec3_3'], 192) + net['dec3_1'] = DecvLayer(net['enc3_1'], net['dec3_2'], 96) + net['dec2_2'] = DecvLayer(net['enc2_2'], net['dec3_1'], 96) + net['dec2_1'] = DecvLayer(net['enc2_1'], net['dec2_2'], 48) + net['dec1_2'] = DecvLayer(net['enc1_2'], net['dec2_1'], 48) + net['dec1_1'] = DecvLayer(net['enc1_1'], net['dec1_2'], 3, nonlinearity=lasagne.nonlinearities.tanh) + net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec1_1']) + net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) # Auxiliary network for the semantic layers, and the nearest neighbors calculations. net['map'] = InputLayer((1, 1, None, None)) for j, i in itertools.product(range(4), range(3)): - if j < 3 and i > 1: continue - if j == 3 and i > 0: continue suffix = '%i_%i' % (j+1, i+1) + if 'enc'+suffix not in net: continue if i == 0: net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') - self.channels[suffix] = net['conv'+suffix].num_filters + self.channels[suffix] = net['enc'+suffix].num_filters if args.semantic_weight > 0.0: - net['sem'+suffix] = ConcatLayer([net['conv'+suffix], net['map%i'%(j+1)]]) + net['sem'+suffix] = ConcatLayer([net['enc'+suffix], net['map%i'%(j+1)]]) else: - net['sem'+suffix] = net['conv'+suffix] + net['sem'+suffix] = net['enc'+suffix] net['dup'+suffix] = InputLayer(net['sem'+suffix].output_shape) net['nn'+suffix] = ConvLayer(net['dup'+suffix], 1, 3, b=None, pad=0, flip_filters=False) @@ -151,17 +168,18 @@ def setup_model(self, input=None): def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - data_file = os.path.join(os.path.dirname(__file__), 'ngg_conv.pkl.bz2') + data_file = os.path.join(os.path.dirname(__file__), 'bgg_conv.pkl.bz2') if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download here...", "https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2") data = pickle.load(bz2.open(data_file, 'rb')) for layer, values in data.items(): - layer = layer.replace('enc', 'conv') if layer not in self.network: continue + # assert layer in self.network, "Layer `{}` not found as expected.".format(layer) for p, v in zip(self.network[layer].get_params(), values): - assert p.get_value().shape == v.shape, "Layer {}: Expected size {}.".format(layer, v.shape) + assert p.get_value().shape == v.shape, "Layer `{}` in network has size {} but data is {}."\ + .format(layer, v.shape, p.get_value().shape) p.set_value(v) def setup(self, layers): @@ -293,7 +311,7 @@ def load_images(self, name, filename): def compile(self, arguments, function): """Build a Theano function that will run the specified expression on the GPU. """ - return theano.function(list(arguments), function, on_unused_input='ignore') + return theano.function(list(arguments), function, on_unused_input='ignore', allow_input_downcast=True) def compute_norms(self, backend, layer, array): ni = backend.sqrt(backend.sum(array[:,:self.model.channels[layer]] ** 2.0, axis=(1,), keepdims=True)) @@ -362,9 +380,8 @@ def prepare_optimization(self): self.model.get_outputs('sem', self.style_layers)) # Patch matching calculation that uses only pre-calculated features and a slice of the patches. - self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in self.style_layers} - self.matcher_history = {l: T.vector() for l in self.style_layers} + self.matcher_history = {l: T.vector() for l in self.style_layers} self.matcher_inputs = {self.model.network['dup'+l]: self.matcher_tensors[l] for l in self.style_layers} nn_layers = [self.model.network['nn'+l] for l in self.style_layers] self.matcher_outputs = dict(zip(self.style_layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) @@ -372,15 +389,8 @@ def prepare_optimization(self): self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l))\ for l in self.style_layers} - self.tensor_matches = [T.tensor4() for l in self.style_layers] - # Build a list of Theano expressions that, once summed up, compute the total error. - self.losses = self.content_loss() + self.total_variation_loss() + self.style_loss() - # Let Theano automatically compute the gradient of the error, used by LBFGS to update image pixels. - grad = T.grad(sum([l[-1] for l in self.losses]), self.model.tensor_img) - # Create a single function that returns the gradient and the individual errors components. - self.compute_grad_and_losses = theano.function( - [self.model.tensor_img, self.model.tensor_map] + self.tensor_matches, - [grad] + [l[-1] for l in self.losses], on_unused_input='ignore') + output = lasagne.layers.get_output(self.model.network['out'], {self.model.network['mid']: self.model.tensor_middle}) + self.compute_output = self.compile([self.model.tensor_middle], output) #------------------------------------------------------------------------------------------------------------------ @@ -430,12 +440,12 @@ def content_loss(self): return content_loss # First extract all the features we need from the model, these results after convolution. - extractor = theano.function([self.model.tensor_img], self.model.get_outputs('conv', self.content_layers)) + extractor = theano.function([self.model.tensor_img], self.model.get_outputs('enc', self.content_layers)) result = extractor(self.content_img) # Build a list of loss components that compute the mean squared error by comparing current result to desired. for l, ref in zip(self.content_layers, result): - layer = self.model.tensor_outputs['conv'+l] + layer = self.model.tensor_outputs['enc'+l] loss = T.mean((layer - ref) ** 2.0) content_loss.append(('content', l, args.content_weight * loss)) print(' - Content layer conv{}: {} features in {:,}kb.'.format(l, ref.shape[1], ref.size//1000)) @@ -450,7 +460,7 @@ def style_loss(self): return style_loss # Extract the patches from the current image, as well as their magnitude. - result = self.do_extract_patches(zip(self.style_layers, self.model.get_outputs('conv', self.style_layers))) + result = self.do_extract_patches(zip(self.style_layers, self.model.get_outputs('enc', self.style_layers))) # Multiple style layers are optimized separately, usually conv3_1 and conv4_1 — semantic data not used here. for l, matches, patches in zip(self.style_layers, self.tensor_matches, result[0::3]): @@ -517,57 +527,33 @@ def evaluate(self, Xn): # Iterate through each of the style layers one by one, computing best matches. current_best = [] - for l, f in zip(self.style_layers, current_features): + for l, ff in zip(self.style_layers, current_features): + f = np.copy(ff) self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) # Compute best matching patches this style layer, going through all slices. - warmup = bool(args.variety > 0.0 and self.iteration == 0) + warmup = bool(args.variety > 0.0) for _ in range(2 if warmup else 1): best_idx = self.evaluate_slices(f, l) patches = self.style_data[l][0] current_best.append(patches[best_idx].astype(np.float32)) - grads, *losses = self.compute_grad_and_losses(current_img, self.content_map, *current_best) - if np.isnan(grads).any(): + from sklearn.feature_extraction.image import reconstruct_from_patches_2d + + channels = self.model.channels[self.style_layers[-1]] + better_patches = current_best[-1][:,:channels].transpose((0, 2, 3, 1)) + better_shape = current_features[-1].shape[2:] + (channels,) + better_features = reconstruct_from_patches_2d(better_patches, better_shape) + + f = better_features.transpose((2, 0, 1))[np.newaxis] # current_features[0][:,:channels] + output = self.compute_output(f) + + if np.isnan(output).any(): raise OverflowError("Optimization diverged; try using a different device or parameters.") - # Use magnitude of gradients as an estimate for overall quality. - self.error = self.error * 0.9 + 0.1 * min(np.abs(grads).max(), 255.0) - loss = sum(losses) - - # Dump the image to disk if requested by the user. - if args.save_every and self.frame % args.save_every == 0: - frame = Xn.reshape(self.content_img.shape[1:]) - resolution = self.content_img_original.shape - image = scipy.misc.toimage(self.model.finalize_image(frame, resolution), cmin=0, cmax=255) - image.save('frames/%04d.png'%self.frame) - - # Print more information to the console every few iterations. - if args.print_every and self.frame % args.print_every == 0: - print('{:>3} {}loss{} {:8.2e} '.format(self.frame, ansi.BOLD, ansi.ENDC, float(loss)), end='') - category = '' - for v, l in zip(losses, self.losses): - if l[0] == 'smooth': - continue - if l[0] != category: - print(' {}{}{}'.format(ansi.BOLD, l[0], ansi.ENDC), end='') - category = l[0] - print(' {}{}{} {:8.2e} '.format(ansi.BOLD, l[1], ansi.ENDC, float(v)), end='') - - current_time = time.time() - quality = 100.0 - 100.0 * np.sqrt(self.error / 255.0) - print(' {}quality{} {: >4.1f}% '.format(ansi.BOLD, ansi.ENDC, quality), end='') - print(' {}time{} {:3.1f}s '.format(ansi.BOLD, ansi.ENDC, current_time - self.iter_time), flush=True) - self.iter_time = current_time - - # Update counters and timers. - self.frame += 1 - self.iteration += 1 - - # Return the data in the right format for L-BFGS. - return loss, np.array(grads).flatten().astype(np.float64) + return output def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. @@ -582,26 +568,30 @@ def run(self): .format(ansi.BLUE_B, i, int(shape[1]*scale), int(shape[0]*scale), scale, ansi.BLUE)) # Precompute all necessary data for the various layers, put patches in place into augmented network. - self.model.setup(layers=['sem'+l for l in self.style_layers] + ['conv'+l for l in self.content_layers]) + self.model.setup(layers=['sem'+l for l in self.style_layers] + ['enc'+l for l in self.content_layers]) self.prepare_content(scale) self.prepare_style(scale) # Now setup the model with the new data, ready for the optimization loop. - self.model.setup(layers=['sem'+l for l in self.style_layers] + ['conv'+l for l in self.used_layers]) + self.model.setup(layers=['out'] + ['sem'+l for l in self.style_layers] + ['enc'+l for l in self.used_layers]) self.prepare_optimization() print('{}'.format(ansi.ENDC)) # Setup the seed for the optimization as specified by the user. shape = self.content_img.shape[2:] if args.seed == 'content': + print(self.content_img[0].min(), self.content_img[0].max()) Xn = (self.content_img[0] + 1.0) * 127.5 if args.seed == 'noise': + assert False bounds = [int(i) for i in args.seed_range.split(':')] Xn = np.random.uniform(bounds[0], bounds[1], shape + (3,)).astype(np.float32) if args.seed == 'previous': + assert False Xn = scipy.misc.imresize(Xn[0], shape, interp='bicubic') Xn = Xn.transpose((2, 0, 1))[np.newaxis] if os.path.exists(args.seed): + assert False seed_image = scipy.ndimage.imread(args.seed, mode='RGB') seed_image = scipy.misc.imresize(seed_image, shape, interp='bicubic') self.seed_image = self.model.prepare_image(seed_image) @@ -614,22 +604,7 @@ def run(self): data_bounds = np.zeros((np.product(Xn.shape), 2), dtype=np.float64) data_bounds[:] = (0.0, 255.0) - self.iter_time, self.iteration, interrupt = time.time(), 0, False - try: - Xn, Vn, info = scipy.optimize.fmin_l_bfgs_b( - self.evaluate, - Xn.astype(np.float64).flatten(), - bounds=data_bounds, - factr=0.0, pgtol=0.0, # Disable automatic termination, set low threshold. - m=5, # Maximum correlations kept in memory by algorithm. - maxfun=args.iterations-1, # Limit number of calls to evaluate(). - iprint=-1) # Handle our own logging of information. - except OverflowError: - error("The optimization diverged and NaNs were encountered.", - " - Try using a different `--device` or change the parameters.", - " - Make sure libraries are updated to work around platform bugs.") - except KeyboardInterrupt: - interrupt = True + Xn = self.evaluate(Xn) args.seed = 'previous' resolution = self.content_img.shape @@ -637,8 +612,9 @@ def run(self): output = self.model.finalize_image(Xn[0], self.content_img_original.shape) scipy.misc.toimage(output, cmin=0, cmax=255).save(args.output) - if interrupt: break + break + interrupt = False status = "finished in" if not interrupt else "interrupted at" print('\n{}Optimization {} {:3.1f}s, average pixel error {:3.1f}!{}\n'\ .format(ansi.CYAN, status, time.time() - self.start_time, self.error, ansi.ENDC)) From ffb7211982d6b30572ce9c6541082d8525f9ee6b Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sat, 7 May 2016 23:53:14 +0200 Subject: [PATCH 04/58] Re-introducing support for multiple phases, multiple iterations. --- doodle.py | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/doodle.py b/doodle.py index f262d90..10fbd64 100755 --- a/doodle.py +++ b/doodle.py @@ -43,8 +43,8 @@ add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') add_arg('--iterations', default=100, type=int, help='Number of iterations to run each resolution.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') -add_arg('--print-every', default=10, type=int, help='How often to log statistics to stdout.') -add_arg('--save-every', default=10, type=int, help='How frequently to save PNG into `frames`.') +add_arg('--print-every', default=1, type=int, help='How often to log statistics to stdout.') +add_arg('--save-every', default=1, type=int, help='How frequently to save PNG into `frames`.') args = parser.parse_args() @@ -553,6 +553,26 @@ def evaluate(self, Xn): if np.isnan(output).any(): raise OverflowError("Optimization diverged; try using a different device or parameters.") + # Dump the image to disk if requested by the user. + if args.save_every and self.frame % args.save_every == 0: + frame = Xn.reshape(self.content_img.shape[1:]) + resolution = self.content_img_original.shape + image = scipy.misc.toimage(self.model.finalize_image(frame, resolution), cmin=0, cmax=255) + image.save('frames/%04d.png'%self.frame) + + # Print more information to the console every few iterations. + if args.print_every and self.frame % args.print_every == 0: + print('{:>3} '.format(self.frame, ansi.BOLD, ansi.ENDC), end='') + + current_time = time.time() + quality = 100.0 - 100.0 * np.sqrt(self.error / 255.0) + print(' {}time{} {:3.1f}s '.format(ansi.BOLD, ansi.ENDC, current_time - self.iter_time), flush=True) + self.iter_time = current_time + + # Update counters and timers. + self.frame += 1 + self.iteration += 1 + return output def run(self): @@ -580,22 +600,18 @@ def run(self): # Setup the seed for the optimization as specified by the user. shape = self.content_img.shape[2:] if args.seed == 'content': - print(self.content_img[0].min(), self.content_img[0].max()) Xn = (self.content_img[0] + 1.0) * 127.5 if args.seed == 'noise': - assert False bounds = [int(i) for i in args.seed_range.split(':')] Xn = np.random.uniform(bounds[0], bounds[1], shape + (3,)).astype(np.float32) if args.seed == 'previous': - assert False Xn = scipy.misc.imresize(Xn[0], shape, interp='bicubic') Xn = Xn.transpose((2, 0, 1))[np.newaxis] if os.path.exists(args.seed): - assert False seed_image = scipy.ndimage.imread(args.seed, mode='RGB') seed_image = scipy.misc.imresize(seed_image, shape, interp='bicubic') self.seed_image = self.model.prepare_image(seed_image) - Xn = self.seed_image[0] + self.model.pixel_mean + Xn = (self.seed_image[0] + 1.0) * 127.5 if Xn is None: error("Seed for optimization was not found. You can either...", " - Set the `--seed` to `content` or `noise`.", " - Specify `--seed` as a valid filename.") @@ -604,7 +620,9 @@ def run(self): data_bounds = np.zeros((np.product(Xn.shape), 2), dtype=np.float64) data_bounds[:] = (0.0, 255.0) - Xn = self.evaluate(Xn) + self.iter_time, self.iteration, interrupt = time.time(), 0, False + for _ in range(args.iterations): + Xn = self.evaluate(Xn) args.seed = 'previous' resolution = self.content_img.shape @@ -612,7 +630,6 @@ def run(self): output = self.model.finalize_image(Xn[0], self.content_img_original.shape) scipy.misc.toimage(output, cmin=0, cmax=255).save(args.output) - break interrupt = False status = "finished in" if not interrupt else "interrupted at" From 54070f9debe511d0daf66ca729fb489aac0239b2 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 8 May 2016 00:23:11 +0200 Subject: [PATCH 05/58] Better default options. 2 phases, 2 iterations. --- doodle.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doodle.py b/doodle.py index 10fbd64..36ee974 100755 --- a/doodle.py +++ b/doodle.py @@ -25,26 +25,26 @@ add_arg = parser.add_argument add_arg('--content', default=None, type=str, help='Content image path as optimization target.') -add_arg('--content-weight', default=2500.0, type=float, help='Weight of content relative to style.') +add_arg('--content-weight', default=2500.0, type=float, help='Weight of content relative to style.') add_arg('--content-layers', default='4_1', type=str, help='The layer with which to match content.') add_arg('--style', default=None, type=str, help='Style image path to extract patches.') -add_arg('--style-weight', default=2500.0, type=float, help='Weight of style relative to content.') +add_arg('--style-weight', default=2500.0, type=float, help='Weight of style relative to content.') add_arg('--style-layers', default='4_1', type=str, help='The layers to match style patches.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=10.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') -add_arg('--phases', default=3, type=int, help='Number of image scales to process in phases.') +add_arg('--phases', default=2, type=int, help='Number of image scales to process in phases.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') add_arg('--cache', default=0, type=int, help='Whether to compute matches only once.') add_arg('--smoothness', default=0E+0, type=float, help='Weight of image smoothing scheme.') add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') add_arg('--seed', default='content', type=str, help='Seed image path, "noise" or "content".') add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') -add_arg('--iterations', default=100, type=int, help='Number of iterations to run each resolution.') +add_arg('--iterations', default=2, type=int, help='Number of iterations to run each resolution.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') -add_arg('--print-every', default=1, type=int, help='How often to log statistics to stdout.') -add_arg('--save-every', default=1, type=int, help='How frequently to save PNG into `frames`.') +add_arg('--print-every', default=1, type=int, help='How often to log statistics to stdout.') +add_arg('--save-every', default=1, type=int, help='How frequently to save PNG into `frames`.') args = parser.parse_args() From 94004e29d436b8adfde501adb14575710c4acd96 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Tue, 10 May 2016 19:09:22 +0200 Subject: [PATCH 06/58] Switching architectures and newly trained network, preparing integration of weighting. --- doodle.py | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/doodle.py b/doodle.py index 36ee974..0b56d42 100755 --- a/doodle.py +++ b/doodle.py @@ -122,25 +122,27 @@ def DecvLayer(copy, input, channels, **args): custom = {'nonlinearity': lasagne.nonlinearities.elu} # Encoder part of the neural network, takes an input image and turns it into abstract patterns. net['img'] = input or InputLayer((1, 3, None, None)) - net['enc1_1'] = ConvLayer(net['img'], 48, 3, pad=1, **custom) - net['enc1_2'] = ConvLayer(net['enc1_1'], 48, 3, pad=1, **custom) - net['enc2_1'] = ConvLayer(net['enc1_2'], 96, 2, pad=0, stride=(2,2), **custom) - net['enc2_2'] = ConvLayer(net['enc2_1'], 96, 3, pad=1, **custom) - net['enc3_1'] = ConvLayer(net['enc2_2'], 192, 2, pad=0, stride=(2,2), **custom) - net['enc3_2'] = ConvLayer(net['enc3_1'], 192, 3, pad=1, **custom) - net['enc3_3'] = ConvLayer(net['enc3_2'], 192, 3, pad=1, **custom) - net['enc4_1'] = ConvLayer(net['enc3_3'], 384, 2, pad=0, stride=(2,2), **custom) + net['enc1_1'] = ConvLayer(net['img'], 32, 3, pad=1, **custom) + net['enc1_2'] = ConvLayer(net['enc1_1'], 32, 3, pad=1, **custom) + net['enc2_1'] = ConvLayer(net['enc1_2'], 64, 2, pad=0, stride=(2,2), **custom) + net['enc2_2'] = ConvLayer(net['enc2_1'], 64, 3, pad=1, **custom) + net['enc3_1'] = ConvLayer(net['enc2_2'], 128, 2, pad=0, stride=(2,2), **custom) + net['enc3_2'] = ConvLayer(net['enc3_1'], 128, 3, pad=1, **custom) + net['enc3_3'] = ConvLayer(net['enc3_2'], 128, 3, pad=1, **custom) + net['enc3_4'] = ConvLayer(net['enc3_3'], 128, 3, pad=1, **custom) + net['enc4_1'] = ConvLayer(net['enc3_4'], 256, 2, pad=0, stride=(2,2), **custom) # Decoder part of the neural network, takes abstract patterns and converts them into an image! self.tensor_middle = T.tensor4() - net['mid'] = InputLayer((1, 384, None, None), var=self.tensor_middle) - net['dec4_1'] = DecvLayer(net['enc4_1'], net['mid'], 192) - net['dec3_3'] = DecvLayer(net['enc3_3'], net['dec4_1'], 192) - net['dec3_2'] = DecvLayer(net['enc3_2'], net['dec3_3'], 192) - net['dec3_1'] = DecvLayer(net['enc3_1'], net['dec3_2'], 96) - net['dec2_2'] = DecvLayer(net['enc2_2'], net['dec3_1'], 96) - net['dec2_1'] = DecvLayer(net['enc2_1'], net['dec2_2'], 48) - net['dec1_2'] = DecvLayer(net['enc1_2'], net['dec2_1'], 48) + net['mid'] = InputLayer((1, 128, None, None), var=self.tensor_middle) + net['dec4_1'] = DecvLayer(net['enc4_1'], net['mid'], 128) + net['dec3_4'] = DecvLayer(net['enc3_4'], net['dec4_1'], 128) + net['dec3_3'] = DecvLayer(net['enc3_3'], net['dec3_4'], 128) + net['dec3_2'] = DecvLayer(net['enc3_2'], net['dec3_3'], 128) + net['dec3_1'] = DecvLayer(net['enc3_1'], net['dec3_2'], 64) + net['dec2_2'] = DecvLayer(net['enc2_2'], net['dec3_1'], 64) + net['dec2_1'] = DecvLayer(net['enc2_1'], net['dec2_2'], 32) + net['dec1_2'] = DecvLayer(net['enc1_2'], net['dec2_1'], 32) net['dec1_1'] = DecvLayer(net['enc1_1'], net['dec1_2'], 3, nonlinearity=lasagne.nonlinearities.tanh) net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec1_1']) net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) @@ -168,10 +170,10 @@ def DecvLayer(copy, input, channels, **args): def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - data_file = os.path.join(os.path.dirname(__file__), 'bgg_conv.pkl.bz2') + data_file = os.path.join(os.path.dirname(__file__), 'gelu2_conv.pkl.bz2') if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download here...", - "https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2") + "https://github.com/alexjc/neural-doodle/releases/download/v0.0/") data = pickle.load(bz2.open(data_file, 'rb')) for layer, values in data.items(): @@ -420,7 +422,7 @@ def do_match_patches(self, layer): # Compute the score of each patch, taking into account statistics from previous iteration. This equalizes # the chances of the patches being selected when the user requests more variety. offset = self.matcher_history[layer].reshape((-1, 1)) - scores = (dist - offset * args.variety) + scores = dist - offset * args.variety # Pick the best style patches for each patch in the current image, the result is an array of indices. # Also return the maximum value along both axis, used to compare slices and add patch variety. return [scores.argmax(axis=0), scores.max(axis=0), dist.max(axis=1)] @@ -533,7 +535,7 @@ def evaluate(self, Xn): self.matcher_tensors[l].set_value(f) # Compute best matching patches this style layer, going through all slices. - warmup = bool(args.variety > 0.0) + warmup = bool(self.iteration == 0 and args.variety > 0.0) for _ in range(2 if warmup else 1): best_idx = self.evaluate_slices(f, l) @@ -547,7 +549,8 @@ def evaluate(self, Xn): better_shape = current_features[-1].shape[2:] + (channels,) better_features = reconstruct_from_patches_2d(better_patches, better_shape) - f = better_features.transpose((2, 0, 1))[np.newaxis] # current_features[0][:,:channels] + f = better_features.transpose((2, 0, 1))[np.newaxis] + # f = current_features[0][:,:channels] output = self.compute_output(f) if np.isnan(output).any(): From abaab87f3b70960acde6763ec7a132a24271076b Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Tue, 10 May 2016 20:55:18 +0200 Subject: [PATCH 07/58] Prototype incrementally propagating data down decoder to the output. --- doodle.py | 52 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/doodle.py b/doodle.py index 0b56d42..f8918ca 100755 --- a/doodle.py +++ b/doodle.py @@ -29,7 +29,7 @@ add_arg('--content-layers', default='4_1', type=str, help='The layer with which to match content.') add_arg('--style', default=None, type=str, help='Style image path to extract patches.') add_arg('--style-weight', default=2500.0, type=float, help='Weight of style relative to content.') -add_arg('--style-layers', default='4_1', type=str, help='The layers to match style patches.') +add_arg('--style-layers', default='4_1,3_1', type=str, help='The layers to match style patches.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=10.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') @@ -80,6 +80,7 @@ def error(message, *lines): import numpy as np import scipy.optimize, scipy.ndimage, scipy.misc import PIL +from sklearn.feature_extraction.image import reconstruct_from_patches_2d # Numeric Computing (GPU) import theano @@ -133,19 +134,22 @@ def DecvLayer(copy, input, channels, **args): net['enc4_1'] = ConvLayer(net['enc3_4'], 256, 2, pad=0, stride=(2,2), **custom) # Decoder part of the neural network, takes abstract patterns and converts them into an image! - self.tensor_middle = T.tensor4() - net['mid'] = InputLayer((1, 128, None, None), var=self.tensor_middle) - net['dec4_1'] = DecvLayer(net['enc4_1'], net['mid'], 128) + self.tensor_latent = [T.tensor4(), T.tensor4()] + net['lat4_1'] = InputLayer((1, 256, None, None), var=self.tensor_latent[0]) + net['dec4_1'] = DecvLayer(net['enc4_1'], net['lat4_1'], 128) net['dec3_4'] = DecvLayer(net['enc3_4'], net['dec4_1'], 128) net['dec3_3'] = DecvLayer(net['enc3_3'], net['dec3_4'], 128) net['dec3_2'] = DecvLayer(net['enc3_2'], net['dec3_3'], 128) - net['dec3_1'] = DecvLayer(net['enc3_1'], net['dec3_2'], 64) + net['out4_1'] = net['dec3_2'] + + net['lat3_1'] = InputLayer((1, 128, None, None), var=self.tensor_latent[1]) + net['dec3_1'] = DecvLayer(net['enc3_1'], net['lat3_1'], 64) net['dec2_2'] = DecvLayer(net['enc2_2'], net['dec3_1'], 64) net['dec2_1'] = DecvLayer(net['enc2_1'], net['dec2_2'], 32) net['dec1_2'] = DecvLayer(net['enc1_2'], net['dec2_1'], 32) net['dec1_1'] = DecvLayer(net['enc1_1'], net['dec1_2'], 3, nonlinearity=lasagne.nonlinearities.tanh) net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec1_1']) - net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) + net['out3_1'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) # Auxiliary network for the semantic layers, and the nearest neighbors calculations. net['map'] = InputLayer((1, 1, None, None)) @@ -391,8 +395,11 @@ def prepare_optimization(self): self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l))\ for l in self.style_layers} - output = lasagne.layers.get_output(self.model.network['out'], {self.model.network['mid']: self.model.tensor_middle}) - self.compute_output = self.compile([self.model.tensor_middle], output) + self.compute_output = [] + for layer, tensor in zip(self.style_layers, self.model.tensor_latent): + output = lasagne.layers.get_output(self.model.network['out'+layer], {self.model.network['lat'+layer]: tensor}) + fn = self.compile([tensor], output) + self.compute_output.append(fn) #------------------------------------------------------------------------------------------------------------------ @@ -528,9 +535,10 @@ def evaluate(self, Xn): current_features = self.compute_features(current_img, self.content_map) # Iterate through each of the style layers one by one, computing best matches. - current_best = [] - for l, ff in zip(self.style_layers, current_features): - f = np.copy(ff) + desired_features = current_features[0] + + for l, ff, compute in zip(self.style_layers, current_features, self.compute_output): + f = np.copy(desired_features) self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) @@ -540,19 +548,19 @@ def evaluate(self, Xn): best_idx = self.evaluate_slices(f, l) patches = self.style_data[l][0] - current_best.append(patches[best_idx].astype(np.float32)) - - from sklearn.feature_extraction.image import reconstruct_from_patches_2d + print('layer', l, 'patches', len(set(best_idx)), '/', best_idx.shape[0]) + current_best = patches[best_idx].astype(np.float32) - channels = self.model.channels[self.style_layers[-1]] - better_patches = current_best[-1][:,:channels].transpose((0, 2, 3, 1)) - better_shape = current_features[-1].shape[2:] + (channels,) - better_features = reconstruct_from_patches_2d(better_patches, better_shape) + channels = self.model.channels[l] + better_patches = current_best[:,:channels].transpose((0, 2, 3, 1)) + better_shape = f.shape[2:] + (channels,) + better_features = reconstruct_from_patches_2d(better_patches, better_shape) - f = better_features.transpose((2, 0, 1))[np.newaxis] - # f = current_features[0][:,:channels] - output = self.compute_output(f) + f = better_features.transpose((2, 0, 1))[np.newaxis] + # f = current_features[0][:,:channels] + desired_features = compute(f) + output = desired_features if np.isnan(output).any(): raise OverflowError("Optimization diverged; try using a different device or parameters.") @@ -596,7 +604,7 @@ def run(self): self.prepare_style(scale) # Now setup the model with the new data, ready for the optimization loop. - self.model.setup(layers=['out'] + ['sem'+l for l in self.style_layers] + ['enc'+l for l in self.used_layers]) + self.model.setup(layers=['out4_1', 'out3_1'] + ['sem'+l for l in self.style_layers] + ['enc'+l for l in self.used_layers]) self.prepare_optimization() print('{}'.format(ansi.ENDC)) From bead8a3fee61e4e326485d7a4cf2d4816f438c32 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Wed, 11 May 2016 14:06:46 +0200 Subject: [PATCH 08/58] Layerwise decoding of the encoded + matched features, work in progress. --- doodle.py | 173 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 95 insertions(+), 78 deletions(-) diff --git a/doodle.py b/doodle.py index f8918ca..01e4fa7 100755 --- a/doodle.py +++ b/doodle.py @@ -24,12 +24,12 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter) add_arg = parser.add_argument -add_arg('--content', default=None, type=str, help='Content image path as optimization target.') -add_arg('--content-weight', default=2500.0, type=float, help='Weight of content relative to style.') -add_arg('--content-layers', default='4_1', type=str, help='The layer with which to match content.') -add_arg('--style', default=None, type=str, help='Style image path to extract patches.') -add_arg('--style-weight', default=2500.0, type=float, help='Weight of style relative to content.') -add_arg('--style-layers', default='4_1,3_1', type=str, help='The layers to match style patches.') +add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') +add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') +add_arg('--balance', default=0.8, type=float, help='Weight of content relative to style.') +add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') +add_arg('--layers', default='4_1', type=str, help='The layer with which to match content.') +add_arg('--shapes', default='3,2', type=str, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=10.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') @@ -37,8 +37,6 @@ add_arg('--phases', default=2, type=int, help='Number of image scales to process in phases.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') add_arg('--cache', default=0, type=int, help='Whether to compute matches only once.') -add_arg('--smoothness', default=0E+0, type=float, help='Weight of image smoothing scheme.') -add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') add_arg('--seed', default='content', type=str, help='Seed image path, "noise" or "content".') add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') add_arg('--iterations', default=2, type=int, help='Number of iterations to run each resolution.') @@ -55,7 +53,7 @@ class ansi: BOLD = '\033[1;97m' WHITE = '\033[0;97m' YELLOW = '\033[0;33m' - YELLOW_B = '\033[0;33m' + YELLOW_B = '\033[1;33m' RED = '\033[0;31m' RED_B = '\033[1;31m' BLUE = '\033[0;94m' @@ -69,6 +67,10 @@ def error(message, *lines): print(string.format(ansi.RED_B, ansi.RED, ansi.ENDC)) sys.exit(-1) +print("""{}NOTICE: This is R&D in progress. Terms and Conditions:{} + - Trained model are for non-commercial use, no redistribution. + - For derived/inspired research, please cite this project.\n{}""".format(ansi.YELLOW_B, ansi.YELLOW, ansi.ENDC)) + print('{}Neural Doodle for semantic style transfer.{}'.format(ansi.CYAN_B, ansi.ENDC)) # Load the underlying deep learning libraries based on the device specified. If you specify THEANO_FLAGS manually, @@ -106,23 +108,40 @@ class Model(object): """Store all the data related to the neural network (aka. "model"). This is currently based on VGG19. """ - def __init__(self): - self.setup_model() + def __init__(self, layers): + self.setup_model(layers) self.load_data() - def setup_model(self, input=None): + def setup_model(self, layers, previous=None): """Use lasagne to create a network of convolution layers, first using VGG19 as the framework and then adding augmentations for Semantic Style Transfer. """ net, self.channels = {}, {} - def DecvLayer(copy, input, channels, **args): - return DeconvLayer(input, channels, copy.filter_size, stride=copy.stride, crop=copy.pad, - nonlinearity=args.get('nonlinearity', lasagne.nonlinearities.elu)) + net['map'] = InputLayer((1, 1, None, None)) + for j in range(4): + net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') + + + def DecvLayer(copy, previous, channels, **params): + # Dynamically injects intermediate pitstop layers in the encoder based on what the user + # specified as layers. It's rather inelegant... Needs a rework! + if copy in layers: + if len(self.tensor_latent) > 0: + l = self.tensor_latent[-1][0] + net['out'+l] = ConcatLayer([previous, net['map%i'%(int(l[0])-1)]]) + + self.tensor_latent.append((copy, T.tensor4())) + net['lat'+copy] = InputLayer((1, previous.num_filters, None, None), var=self.tensor_latent[-1][1]) + previous = net['lat'+copy] + + dup = net['enc'+copy] + return DeconvLayer(previous, channels, dup.filter_size, stride=dup.stride, crop=dup.pad, + nonlinearity=params.get('nonlinearity', lasagne.nonlinearities.elu)) custom = {'nonlinearity': lasagne.nonlinearities.elu} # Encoder part of the neural network, takes an input image and turns it into abstract patterns. - net['img'] = input or InputLayer((1, 3, None, None)) + net['img'] = previous or InputLayer((1, 3, None, None)) net['enc1_1'] = ConvLayer(net['img'], 32, 3, pad=1, **custom) net['enc1_2'] = ConvLayer(net['enc1_1'], 32, 3, pad=1, **custom) net['enc2_1'] = ConvLayer(net['enc1_2'], 64, 2, pad=0, stride=(2,2), **custom) @@ -134,33 +153,27 @@ def DecvLayer(copy, input, channels, **args): net['enc4_1'] = ConvLayer(net['enc3_4'], 256, 2, pad=0, stride=(2,2), **custom) # Decoder part of the neural network, takes abstract patterns and converts them into an image! - self.tensor_latent = [T.tensor4(), T.tensor4()] - net['lat4_1'] = InputLayer((1, 256, None, None), var=self.tensor_latent[0]) - net['dec4_1'] = DecvLayer(net['enc4_1'], net['lat4_1'], 128) - net['dec3_4'] = DecvLayer(net['enc3_4'], net['dec4_1'], 128) - net['dec3_3'] = DecvLayer(net['enc3_3'], net['dec3_4'], 128) - net['dec3_2'] = DecvLayer(net['enc3_2'], net['dec3_3'], 128) - net['out4_1'] = net['dec3_2'] - - net['lat3_1'] = InputLayer((1, 128, None, None), var=self.tensor_latent[1]) - net['dec3_1'] = DecvLayer(net['enc3_1'], net['lat3_1'], 64) - net['dec2_2'] = DecvLayer(net['enc2_2'], net['dec3_1'], 64) - net['dec2_1'] = DecvLayer(net['enc2_1'], net['dec2_2'], 32) - net['dec1_2'] = DecvLayer(net['enc1_2'], net['dec2_1'], 32) - net['dec1_1'] = DecvLayer(net['enc1_1'], net['dec1_2'], 3, nonlinearity=lasagne.nonlinearities.tanh) + self.tensor_latent = [] + net['dec4_1'] = DecvLayer('4_1', net['enc4_1'], 128) + net['dec3_4'] = DecvLayer('3_4', net['dec4_1'], 128) + net['dec3_3'] = DecvLayer('3_3', net['dec3_4'], 128) + net['dec3_2'] = DecvLayer('3_2', net['dec3_3'], 128) + net['dec3_1'] = DecvLayer('3_1', net['dec3_2'], 64) + net['dec2_2'] = DecvLayer('2_2', net['dec3_1'], 64) + net['dec2_1'] = DecvLayer('2_1', net['dec2_2'], 32) + net['dec1_2'] = DecvLayer('1_2', net['dec2_1'], 32) + net['dec1_1'] = DecvLayer('1_1', net['dec1_2'], 3, nonlinearity=lasagne.nonlinearities.tanh) net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec1_1']) - net['out3_1'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) + + l = self.tensor_latent[-1][0] + net['out'+l] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) # Auxiliary network for the semantic layers, and the nearest neighbors calculations. - net['map'] = InputLayer((1, 1, None, None)) for j, i in itertools.product(range(4), range(3)): suffix = '%i_%i' % (j+1, i+1) if 'enc'+suffix not in net: continue - if i == 0: - net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') - self.channels[suffix] = net['enc'+suffix].num_filters - + self.channels[suffix] = net['enc'+suffix].num_filters if args.semantic_weight > 0.0: net['sem'+suffix] = ConcatLayer([net['enc'+suffix], net['map%i'%(j+1)]]) else: @@ -177,12 +190,11 @@ def load_data(self): data_file = os.path.join(os.path.dirname(__file__), 'gelu2_conv.pkl.bz2') if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download here...", - "https://github.com/alexjc/neural-doodle/releases/download/v0.0/") + "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu2_conv.pkl.bz2") data = pickle.load(bz2.open(data_file, 'rb')) for layer, values in data.items(): - if layer not in self.network: continue - # assert layer in self.network, "Layer `{}` not found as expected.".format(layer) + assert layer in self.network, "Layer `{}` not found as expected.".format(layer) for p, v in zip(self.network[layer].get_params(), values): assert p.get_value().shape == v.shape, "Layer `{}` in network has size {} but data is {}."\ .format(layer, v.shape, p.get_value().shape) @@ -233,9 +245,7 @@ def __init__(self): """ self.start_time = time.time() self.style_cache = {} - self.style_layers = args.style_layers.split(',') - self.content_layers = args.content_layers.split(',') - self.used_layers = self.style_layers + self.content_layers + self.layers = args.layers.split(',') # Prepare file output and load files specified as input. if args.save_every is not None: @@ -290,7 +300,7 @@ def __init__(self): # Finalize the parameters based on what we loaded, then create the model. args.semantic_weight = math.sqrt(9.0 / args.semantic_weight) if args.semantic_weight else 0.0 - self.model = Model() + self.model = Model(self.layers) #------------------------------------------------------------------------------------------------------------------ @@ -325,7 +335,7 @@ def compute_norms(self, backend, layer, array): return [ni] + [ns] def normalize_components(self, layer, array, norms): - if args.style_weight > 0.0: + if args.balance > 0.0: array[:,:self.model.channels[layer]] /= (norms[0] * 3.0) if args.semantic_weight > 0.0: array[:,self.model.channels[layer]:] /= (norms[1] * args.semantic_weight) @@ -362,13 +372,13 @@ def prepare_style(self, scale=1.0): self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32) # Compile a function to run on the GPU to extract patches for all layers at once. - layer_outputs = zip(self.style_layers, self.model.get_outputs('sem', self.style_layers)) - extractor = self.compile([self.model.tensor_img, self.model.tensor_map], self.do_extract_patches(layer_outputs)) + layer_patches = self.do_extract_patches(self.layers, self.model.get_outputs('sem', self.layers), [3, 2]) + extractor = self.compile([self.model.tensor_img, self.model.tensor_map], layer_patches) result = extractor(self.style_img, self.style_map) # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. self.style_data = {} - for layer, *data in zip(self.style_layers, result[0::3], result[1::3], result[2::3]): + for layer, *data in zip(self.layers, result[0::3], result[1::3], result[2::3]): patches = data[0] l = self.model.network['nn'+layer] l.num_filters = patches.shape[0] // args.slices @@ -383,22 +393,24 @@ def prepare_optimization(self): # Feed-forward calculation only, returns the result of the convolution post-activation self.compute_features = self.compile([self.model.tensor_img, self.model.tensor_map], - self.model.get_outputs('sem', self.style_layers)) + self.model.get_outputs('sem', self.layers)) # Patch matching calculation that uses only pre-calculated features and a slice of the patches. - self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in self.style_layers} - self.matcher_history = {l: T.vector() for l in self.style_layers} - self.matcher_inputs = {self.model.network['dup'+l]: self.matcher_tensors[l] for l in self.style_layers} - nn_layers = [self.model.network['nn'+l] for l in self.style_layers] - self.matcher_outputs = dict(zip(self.style_layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) + self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in self.layers} + self.matcher_history = {l: T.vector() for l in self.layers} + self.matcher_inputs = {self.model.network['dup'+l]: self.matcher_tensors[l] for l in self.layers} + nn_layers = [self.model.network['nn'+l] for l in self.layers] + self.matcher_outputs = dict(zip(self.layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l))\ - for l in self.style_layers} + for l in self.layers} self.compute_output = [] - for layer, tensor in zip(self.style_layers, self.model.tensor_latent): - output = lasagne.layers.get_output(self.model.network['out'+layer], {self.model.network['lat'+layer]: tensor}) - fn = self.compile([tensor], output) + for layer, (_, tensor_latent) in zip(self.layers, self.model.tensor_latent): + output = lasagne.layers.get_output(self.model.network['out'+layer], + {self.model.network['lat'+layer]: tensor_latent, + self.model.network['map']: self.model.tensor_map}) + fn = self.compile([tensor_latent, self.model.tensor_map], output) self.compute_output.append(fn) @@ -406,19 +418,19 @@ def prepare_optimization(self): # Theano Computation #------------------------------------------------------------------------------------------------------------------ - def do_extract_patches(self, layers, size=3, stride=1): + def do_extract_patches(self, layers, outputs, sizes, stride=1): """This function builds a Theano expression that will get compiled an run on the GPU. It extracts 3x3 patches from the intermediate outputs in the model. """ results = [] - for l, f in layers: + for layer, output, size in zip(layers, outputs, sizes): # Use a Theano helper function to extract "neighbors" of specific size, seems a bit slower than doing # it manually but much simpler! - patches = theano.tensor.nnet.neighbours.images2neibs(f, (size, size), (stride, stride), mode='valid') + patches = theano.tensor.nnet.neighbours.images2neibs(output, (size, size), (stride, stride), mode='valid') # Make sure the patches are in the shape required to insert them into the model as another layer. - patches = patches.reshape((-1, patches.shape[0] // f.shape[1], size, size)).dimshuffle((1, 0, 2, 3)) + patches = patches.reshape((-1, patches.shape[0] // output.shape[1], size, size)).dimshuffle((1, 0, 2, 3)) # Calculate the magnitude that we'll use for normalization at runtime, then store... - results.extend([patches] + self.compute_norms(T, l, patches)) + results.extend([patches] + self.compute_norms(T, layer, patches)) return results def do_match_patches(self, layer): @@ -449,11 +461,11 @@ def content_loss(self): return content_loss # First extract all the features we need from the model, these results after convolution. - extractor = theano.function([self.model.tensor_img], self.model.get_outputs('enc', self.content_layers)) + extractor = theano.function([self.model.tensor_img], self.model.get_outputs('enc', self.layers)) result = extractor(self.content_img) # Build a list of loss components that compute the mean squared error by comparing current result to desired. - for l, ref in zip(self.content_layers, result): + for l, ref in zip(self.layers, result): layer = self.model.tensor_outputs['enc'+l] loss = T.mean((layer - ref) ** 2.0) content_loss.append(('content', l, args.content_weight * loss)) @@ -469,10 +481,10 @@ def style_loss(self): return style_loss # Extract the patches from the current image, as well as their magnitude. - result = self.do_extract_patches(zip(self.style_layers, self.model.get_outputs('enc', self.style_layers))) + result = self.do_extract_patches(self.layers, self.model.get_outputs('enc', self.layers), [3, 2]) # Multiple style layers are optimized separately, usually conv3_1 and conv4_1 — semantic data not used here. - for l, matches, patches in zip(self.style_layers, self.tensor_matches, result[0::3]): + for l, matches, patches in zip(self.layers, self.tensor_matches, result[0::3]): # Compute the mean squared error between the current patch and the best matching style patch. # Ignore the last channels (from semantic map) so errors returned are indicative of image only. loss = T.mean((patches - matches[:,:self.model.channels[l]]) ** 2.0) @@ -525,11 +537,15 @@ def evaluate_slices(self, f, l): if args.cache: self.style_cache[l] = best_idx - return best_idx + return best_idx, best_val def evaluate(self, Xn): """Callback for the L-BFGS optimization that computes the loss and gradients on the GPU. """ + + if args.print_every and self.frame % args.print_every == 0: + print('{:>3} {}layer{}'.format(self.frame, ansi.BOLD, ansi.ENDC), end='', flush=True) + # Adjust the representation to be compatible with the model before computing results. current_img = Xn.reshape(self.content_img.shape).astype(np.float32) / 127.5 - 1.0 current_features = self.compute_features(current_img, self.content_map) @@ -537,18 +553,21 @@ def evaluate(self, Xn): # Iterate through each of the style layers one by one, computing best matches. desired_features = current_features[0] - for l, ff, compute in zip(self.style_layers, current_features, self.compute_output): - f = np.copy(desired_features) + for l, ff, compute in zip(self.layers, current_features, self.compute_output): + f = np.copy(ff) self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) # Compute best matching patches this style layer, going through all slices. warmup = bool(self.iteration == 0 and args.variety > 0.0) for _ in range(2 if warmup else 1): - best_idx = self.evaluate_slices(f, l) + best_idx, best_val = self.evaluate_slices(f, l) patches = self.style_data[l][0] - print('layer', l, 'patches', len(set(best_idx)), '/', best_idx.shape[0]) + using = 100.0 * len(set(best_idx)) / best_idx.shape[0] + dupes = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] + self.error = best_val.mean() + print(' {}{}{} patches {:2.0f}% dupes {:2.0f}% '.format(ansi.BOLD, l, ansi.ENDC, using, dupes), end='', flush=True) current_best = patches[best_idx].astype(np.float32) channels = self.model.channels[l] @@ -558,7 +577,7 @@ def evaluate(self, Xn): f = better_features.transpose((2, 0, 1))[np.newaxis] # f = current_features[0][:,:channels] - desired_features = compute(f) + desired_features = compute(f, self.content_map) output = desired_features if np.isnan(output).any(): @@ -573,10 +592,7 @@ def evaluate(self, Xn): # Print more information to the console every few iterations. if args.print_every and self.frame % args.print_every == 0: - print('{:>3} '.format(self.frame, ansi.BOLD, ansi.ENDC), end='') - current_time = time.time() - quality = 100.0 - 100.0 * np.sqrt(self.error / 255.0) print(' {}time{} {:3.1f}s '.format(ansi.BOLD, ansi.ENDC, current_time - self.iter_time), flush=True) self.iter_time = current_time @@ -599,12 +615,13 @@ def run(self): .format(ansi.BLUE_B, i, int(shape[1]*scale), int(shape[0]*scale), scale, ansi.BLUE)) # Precompute all necessary data for the various layers, put patches in place into augmented network. - self.model.setup(layers=['sem'+l for l in self.style_layers] + ['enc'+l for l in self.content_layers]) + self.model.setup(layers=['sem'+l for l in self.layers] + ['enc'+l for l in self.layers]) self.prepare_content(scale) self.prepare_style(scale) # Now setup the model with the new data, ready for the optimization loop. - self.model.setup(layers=['out4_1', 'out3_1'] + ['sem'+l for l in self.style_layers] + ['enc'+l for l in self.used_layers]) + # TODO: , 'out3_1' + self.model.setup(layers=['out4_1'] + ['sem'+l for l in self.layers] + ['enc'+l for l in self.layers]) self.prepare_optimization() print('{}'.format(ansi.ENDC)) @@ -644,7 +661,7 @@ def run(self): interrupt = False status = "finished in" if not interrupt else "interrupted at" - print('\n{}Optimization {} {:3.1f}s, average pixel error {:3.1f}!{}\n'\ + print('\n{}Optimization {} {:3.1f}s, average patch error {:3.1f}!{}\n'\ .format(ansi.CYAN, status, time.time() - self.start_time, self.error, ansi.ENDC)) From d77ed33d4ca77e7c70a1d7ac0f1ff1387612f045 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Wed, 11 May 2016 16:02:11 +0200 Subject: [PATCH 09/58] New balance parameter replaces both weights. Balance at 0.0 means full content, 1.0 means full style. --- doodle.py | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/doodle.py b/doodle.py index 01e4fa7..99d5be1 100755 --- a/doodle.py +++ b/doodle.py @@ -26,12 +26,12 @@ add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') -add_arg('--balance', default=0.8, type=float, help='Weight of content relative to style.') +add_arg('--balance', default=1.0, type=float, help='Weight of style relative to content.') add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') add_arg('--layers', default='4_1', type=str, help='The layer with which to match content.') add_arg('--shapes', default='3,2', type=str, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') -add_arg('--semantic-weight', default=10.0, type=float, help='Global weight of semantics vs. features.') +add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') add_arg('--phases', default=2, type=int, help='Number of image scales to process in phases.') @@ -39,7 +39,7 @@ add_arg('--cache', default=0, type=int, help='Whether to compute matches only once.') add_arg('--seed', default='content', type=str, help='Seed image path, "noise" or "content".') add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') -add_arg('--iterations', default=2, type=int, help='Number of iterations to run each resolution.') +add_arg('--iterations', default=3, type=int, help='Number of iterations to run each resolution.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') add_arg('--print-every', default=1, type=int, help='How often to log statistics to stdout.') add_arg('--save-every', default=1, type=int, help='How frequently to save PNG into `frames`.') @@ -540,7 +540,7 @@ def evaluate_slices(self, f, l): return best_idx, best_val def evaluate(self, Xn): - """Callback for the L-BFGS optimization that computes the loss and gradients on the GPU. + """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ if args.print_every and self.frame % args.print_every == 0: @@ -551,10 +551,10 @@ def evaluate(self, Xn): current_features = self.compute_features(current_img, self.content_map) # Iterate through each of the style layers one by one, computing best matches. - desired_features = current_features[0] + desired_feature = current_features[0] - for l, ff, compute in zip(self.layers, current_features, self.compute_output): - f = np.copy(ff) + for l, current_feature, compute in zip(self.layers, current_features, self.compute_output): + f = np.copy(desired_feature) self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) @@ -575,13 +575,12 @@ def evaluate(self, Xn): better_shape = f.shape[2:] + (channels,) better_features = reconstruct_from_patches_2d(better_patches, better_shape) - f = better_features.transpose((2, 0, 1))[np.newaxis] - # f = current_features[0][:,:channels] - desired_features = compute(f, self.content_map) + f = (1.0 - args.balance) * current_feature[:,:channels]\ + + (0.0 + args.balance) * better_features.transpose((2, 0, 1))[np.newaxis] + desired_feature = compute(f, self.content_map) - output = desired_features - if np.isnan(output).any(): - raise OverflowError("Optimization diverged; try using a different device or parameters.") + if np.isnan(desired_feature).any(): + raise OverflowError("Optimization diverged; try using a different device or parameters.") # Dump the image to disk if requested by the user. if args.save_every and self.frame % args.save_every == 0: @@ -600,7 +599,7 @@ def evaluate(self, Xn): self.frame += 1 self.iteration += 1 - return output + return desired_feature def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. From 703155506e34a73e5a2a6338de1e79bda8551243 Mon Sep 17 00:00:00 2001 From: longears Date: Wed, 11 May 2016 23:10:08 -0700 Subject: [PATCH 10/58] Fix crash when a directory named "content" exists (#88) --- doodle.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doodle.py b/doodle.py index 99d5be1..c092c19 100755 --- a/doodle.py +++ b/doodle.py @@ -628,13 +628,13 @@ def run(self): shape = self.content_img.shape[2:] if args.seed == 'content': Xn = (self.content_img[0] + 1.0) * 127.5 - if args.seed == 'noise': + elif args.seed == 'noise': bounds = [int(i) for i in args.seed_range.split(':')] Xn = np.random.uniform(bounds[0], bounds[1], shape + (3,)).astype(np.float32) - if args.seed == 'previous': + elif args.seed == 'previous': Xn = scipy.misc.imresize(Xn[0], shape, interp='bicubic') Xn = Xn.transpose((2, 0, 1))[np.newaxis] - if os.path.exists(args.seed): + elif os.path.exists(args.seed): seed_image = scipy.ndimage.imread(args.seed, mode='RGB') seed_image = scipy.misc.imresize(seed_image, shape, interp='bicubic') self.seed_image = self.model.prepare_image(seed_image) From 419d752675874a92324e0a0ef731dfaafb292058 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Thu, 12 May 2016 09:59:45 +0200 Subject: [PATCH 11/58] Cleaning up multi-parameter implementation, supporting patch shapes. --- doodle.py | 60 +++++++++++++++++++++++++++---------------------------- 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/doodle.py b/doodle.py index c092c19..8a88392 100755 --- a/doodle.py +++ b/doodle.py @@ -28,11 +28,11 @@ add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') add_arg('--balance', default=1.0, type=float, help='Weight of style relative to content.') add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') -add_arg('--layers', default='4_1', type=str, help='The layer with which to match content.') -add_arg('--shapes', default='3,2', type=str, help='Size of kernels used for patch extraction.') +add_arg('--layers', default=['4_1'], nargs='+', type=str, help='The layer with which to match content.') +add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') -add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') -add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') +add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') +add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') add_arg('--phases', default=2, type=int, help='Number of image scales to process in phases.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') @@ -68,7 +68,7 @@ def error(message, *lines): sys.exit(-1) print("""{}NOTICE: This is R&D in progress. Terms and Conditions:{} - - Trained model are for non-commercial use, no redistribution. + - Trained models are for non-commercial use, no redistribution. - For derived/inspired research, please cite this project.\n{}""".format(ansi.YELLOW_B, ansi.YELLOW, ansi.ENDC)) print('{}Neural Doodle for semantic style transfer.{}'.format(ansi.CYAN_B, ansi.ENDC)) @@ -108,11 +108,11 @@ class Model(object): """Store all the data related to the neural network (aka. "model"). This is currently based on VGG19. """ - def __init__(self, layers): - self.setup_model(layers) + def __init__(self): + self.setup_model() self.load_data() - def setup_model(self, layers, previous=None): + def setup_model(self, previous=None): """Use lasagne to create a network of convolution layers, first using VGG19 as the framework and then adding augmentations for Semantic Style Transfer. """ @@ -126,7 +126,7 @@ def setup_model(self, layers, previous=None): def DecvLayer(copy, previous, channels, **params): # Dynamically injects intermediate pitstop layers in the encoder based on what the user # specified as layers. It's rather inelegant... Needs a rework! - if copy in layers: + if copy in args.layers: if len(self.tensor_latent) > 0: l = self.tensor_latent[-1][0] net['out'+l] = ConcatLayer([previous, net['map%i'%(int(l[0])-1)]]) @@ -245,7 +245,6 @@ def __init__(self): """ self.start_time = time.time() self.style_cache = {} - self.layers = args.layers.split(',') # Prepare file output and load files specified as input. if args.save_every is not None: @@ -300,7 +299,7 @@ def __init__(self): # Finalize the parameters based on what we loaded, then create the model. args.semantic_weight = math.sqrt(9.0 / args.semantic_weight) if args.semantic_weight else 0.0 - self.model = Model(self.layers) + self.model = Model() #------------------------------------------------------------------------------------------------------------------ @@ -372,13 +371,13 @@ def prepare_style(self, scale=1.0): self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32) # Compile a function to run on the GPU to extract patches for all layers at once. - layer_patches = self.do_extract_patches(self.layers, self.model.get_outputs('sem', self.layers), [3, 2]) + layer_patches = self.do_extract_patches(args.layers, self.model.get_outputs('sem', args.layers), args.shapes) extractor = self.compile([self.model.tensor_img, self.model.tensor_map], layer_patches) result = extractor(self.style_img, self.style_map) # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. self.style_data = {} - for layer, *data in zip(self.layers, result[0::3], result[1::3], result[2::3]): + for layer, *data in zip(args.layers, result[0::3], result[1::3], result[2::3]): patches = data[0] l = self.model.network['nn'+layer] l.num_filters = patches.shape[0] // args.slices @@ -393,20 +392,20 @@ def prepare_optimization(self): # Feed-forward calculation only, returns the result of the convolution post-activation self.compute_features = self.compile([self.model.tensor_img, self.model.tensor_map], - self.model.get_outputs('sem', self.layers)) + self.model.get_outputs('sem', args.layers)) # Patch matching calculation that uses only pre-calculated features and a slice of the patches. - self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in self.layers} - self.matcher_history = {l: T.vector() for l in self.layers} - self.matcher_inputs = {self.model.network['dup'+l]: self.matcher_tensors[l] for l in self.layers} - nn_layers = [self.model.network['nn'+l] for l in self.layers] - self.matcher_outputs = dict(zip(self.layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) + self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in args.layers} + self.matcher_history = {l: T.vector() for l in args.layers} + self.matcher_inputs = {self.model.network['dup'+l]: self.matcher_tensors[l] for l in args.layers} + nn_layers = [self.model.network['nn'+l] for l in args.layers] + self.matcher_outputs = dict(zip(args.layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l))\ - for l in self.layers} + for l in args.layers} self.compute_output = [] - for layer, (_, tensor_latent) in zip(self.layers, self.model.tensor_latent): + for layer, (_, tensor_latent) in zip(args.layers, self.model.tensor_latent): output = lasagne.layers.get_output(self.model.network['out'+layer], {self.model.network['lat'+layer]: tensor_latent, self.model.network['map']: self.model.tensor_map}) @@ -461,11 +460,11 @@ def content_loss(self): return content_loss # First extract all the features we need from the model, these results after convolution. - extractor = theano.function([self.model.tensor_img], self.model.get_outputs('enc', self.layers)) + extractor = theano.function([self.model.tensor_img], self.model.get_outputs('enc', args.layers)) result = extractor(self.content_img) # Build a list of loss components that compute the mean squared error by comparing current result to desired. - for l, ref in zip(self.layers, result): + for l, ref in zip(args.layers, result): layer = self.model.tensor_outputs['enc'+l] loss = T.mean((layer - ref) ** 2.0) content_loss.append(('content', l, args.content_weight * loss)) @@ -481,10 +480,10 @@ def style_loss(self): return style_loss # Extract the patches from the current image, as well as their magnitude. - result = self.do_extract_patches(self.layers, self.model.get_outputs('enc', self.layers), [3, 2]) + result = self.do_extract_patches(args.layers, self.model.get_outputs('enc', args.layers), args.shapes) # Multiple style layers are optimized separately, usually conv3_1 and conv4_1 — semantic data not used here. - for l, matches, patches in zip(self.layers, self.tensor_matches, result[0::3]): + for l, matches, patches in zip(args.layers, self.tensor_matches, result[0::3]): # Compute the mean squared error between the current patch and the best matching style patch. # Ignore the last channels (from semantic map) so errors returned are indicative of image only. loss = T.mean((patches - matches[:,:self.model.channels[l]]) ** 2.0) @@ -553,7 +552,7 @@ def evaluate(self, Xn): # Iterate through each of the style layers one by one, computing best matches. desired_feature = current_features[0] - for l, current_feature, compute in zip(self.layers, current_features, self.compute_output): + for l, balance, current_feature, compute in zip(args.layers, args.balance, current_features, self.compute_output): f = np.copy(desired_feature) self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) @@ -575,8 +574,8 @@ def evaluate(self, Xn): better_shape = f.shape[2:] + (channels,) better_features = reconstruct_from_patches_2d(better_patches, better_shape) - f = (1.0 - args.balance) * current_feature[:,:channels]\ - + (0.0 + args.balance) * better_features.transpose((2, 0, 1))[np.newaxis] + f = (1.0 - balance) * current_feature[:,:channels]\ + + (0.0 + balance) * better_features.transpose((2, 0, 1))[np.newaxis] desired_feature = compute(f, self.content_map) if np.isnan(desired_feature).any(): @@ -614,13 +613,12 @@ def run(self): .format(ansi.BLUE_B, i, int(shape[1]*scale), int(shape[0]*scale), scale, ansi.BLUE)) # Precompute all necessary data for the various layers, put patches in place into augmented network. - self.model.setup(layers=['sem'+l for l in self.layers] + ['enc'+l for l in self.layers]) + self.model.setup(layers=['sem'+l for l in args.layers]) self.prepare_content(scale) self.prepare_style(scale) # Now setup the model with the new data, ready for the optimization loop. - # TODO: , 'out3_1' - self.model.setup(layers=['out4_1'] + ['sem'+l for l in self.layers] + ['enc'+l for l in self.layers]) + self.model.setup(layers=['sem'+l for l in args.layers] + ['out'+l for l in args.layers]) self.prepare_optimization() print('{}'.format(ansi.ENDC)) From 9af43f678c84806557021a3ba2fdfd7e9b66b54b Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Thu, 12 May 2016 10:46:30 +0200 Subject: [PATCH 12/58] Snap to grid depends on layers. Closes #87. --- doodle.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doodle.py b/doodle.py index 8a88392..5764923 100755 --- a/doodle.py +++ b/doodle.py @@ -19,14 +19,14 @@ import collections -# Configure all options first so we can custom load other libraries (Theano) based on device specified by user. +# Configure all options first so we can later custom-load other libraries (Theano) based on device specified by user. parser = argparse.ArgumentParser(description='Generate a new image by applying style onto a content image.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) add_arg = parser.add_argument add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') -add_arg('--balance', default=1.0, type=float, help='Weight of style relative to content.') +add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') add_arg('--layers', default=['4_1'], nargs='+', type=str, help='The layer with which to match content.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') @@ -34,12 +34,12 @@ add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') +add_arg('--iterations', default=3, type=int, help='Number of iterations to run at each resolution.') add_arg('--phases', default=2, type=int, help='Number of image scales to process in phases.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') add_arg('--cache', default=0, type=int, help='Whether to compute matches only once.') add_arg('--seed', default='content', type=str, help='Seed image path, "noise" or "content".') add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') -add_arg('--iterations', default=3, type=int, help='Number of iterations to run each resolution.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') add_arg('--print-every', default=1, type=int, help='How often to log statistics to stdout.') add_arg('--save-every', default=1, type=int, help='How frequently to save PNG into `frames`.') @@ -334,10 +334,9 @@ def compute_norms(self, backend, layer, array): return [ni] + [ns] def normalize_components(self, layer, array, norms): - if args.balance > 0.0: - array[:,:self.model.channels[layer]] /= (norms[0] * 3.0) if args.semantic_weight > 0.0: array[:,self.model.channels[layer]:] /= (norms[1] * args.semantic_weight) + array[:,:self.model.channels[layer]] /= (norms[0] * 3.0) #------------------------------------------------------------------------------------------------------------------ @@ -347,8 +346,9 @@ def normalize_components(self, layer, array, norms): def rescale_image(self, img, scale): """Re-implementing skimage.transform.scale without the extra dependency. Saves a lot of space and hassle! """ + def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floor(value / grid)) output = scipy.misc.toimage(img, cmin=0.0, cmax=255) - output.thumbnail((int(output.size[0]*scale), int(output.size[1]*scale)), PIL.Image.ANTIALIAS) + output.thumbnail((snap(output.size[0]*scale), snap(output.size[1]*scale)), PIL.Image.ANTIALIAS) return np.asarray(output) def prepare_content(self, scale=1.0): From 9c7125a72d0c30b7d82be0b8b188427910391f66 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Thu, 12 May 2016 19:43:32 +0200 Subject: [PATCH 13/58] Reworking arguments to support nargs and lists, adding model configuration for 5_1 to load new data. --- doodle.py | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/doodle.py b/doodle.py index 5764923..a9322e7 100755 --- a/doodle.py +++ b/doodle.py @@ -27,17 +27,16 @@ add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') -add_arg('--variety', default=0.0, type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') +add_arg('--variety', default=[0.0], nargs='+', type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') add_arg('--layers', default=['4_1'], nargs='+', type=str, help='The layer with which to match content.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') -add_arg('--output', default='output.png', type=str, help='Output image path to save once done.') +add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') add_arg('--iterations', default=3, type=int, help='Number of iterations to run at each resolution.') add_arg('--phases', default=2, type=int, help='Number of image scales to process in phases.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') -add_arg('--cache', default=0, type=int, help='Whether to compute matches only once.') add_arg('--seed', default='content', type=str, help='Seed image path, "noise" or "content".') add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') @@ -119,7 +118,7 @@ def setup_model(self, previous=None): net, self.channels = {}, {} net['map'] = InputLayer((1, 1, None, None)) - for j in range(4): + for j in range(5): net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') @@ -151,10 +150,18 @@ def DecvLayer(copy, previous, channels, **params): net['enc3_3'] = ConvLayer(net['enc3_2'], 128, 3, pad=1, **custom) net['enc3_4'] = ConvLayer(net['enc3_3'], 128, 3, pad=1, **custom) net['enc4_1'] = ConvLayer(net['enc3_4'], 256, 2, pad=0, stride=(2,2), **custom) + net['enc4_2'] = ConvLayer(net['enc4_1'], 256, 3, pad=1, **custom) + net['enc4_3'] = ConvLayer(net['enc4_2'], 256, 3, pad=1, **custom) + net['enc4_4'] = ConvLayer(net['enc4_3'], 256, 3, pad=1, **custom) + net['enc5_1'] = ConvLayer(net['enc4_4'], 512, 2, pad=0, stride=(2,2), **custom) # Decoder part of the neural network, takes abstract patterns and converts them into an image! self.tensor_latent = [] - net['dec4_1'] = DecvLayer('4_1', net['enc4_1'], 128) + net['dec5_1'] = DecvLayer('5_1', net['enc5_1'], 256) + net['dec4_4'] = DecvLayer('4_4', net['dec5_1'], 256) + net['dec4_3'] = DecvLayer('4_3', net['dec4_4'], 256) + net['dec4_2'] = DecvLayer('4_2', net['dec4_3'], 256) + net['dec4_1'] = DecvLayer('4_1', net['dec4_2'], 128) net['dec3_4'] = DecvLayer('3_4', net['dec4_1'], 128) net['dec3_3'] = DecvLayer('3_3', net['dec3_4'], 128) net['dec3_2'] = DecvLayer('3_2', net['dec3_3'], 128) @@ -169,7 +176,7 @@ def DecvLayer(copy, previous, channels, **params): net['out'+l] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) # Auxiliary network for the semantic layers, and the nearest neighbors calculations. - for j, i in itertools.product(range(4), range(3)): + for j, i in itertools.product(range(5), range(4)): suffix = '%i_%i' % (j+1, i+1) if 'enc'+suffix not in net: continue @@ -244,7 +251,6 @@ def __init__(self): """Constructor sets up global variables, loads and validates files, then builds the model. """ self.start_time = time.time() - self.style_cache = {} # Prepare file output and load files specified as input. if args.save_every is not None: @@ -346,7 +352,7 @@ def normalize_components(self, layer, array, norms): def rescale_image(self, img, scale): """Re-implementing skimage.transform.scale without the extra dependency. Saves a lot of space and hassle! """ - def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floor(value / grid)) + def snap(value, grid=2**int(args.layers[0][0])): return int(grid * math.floor(value / grid)) output = scipy.misc.toimage(img, cmin=0.0, cmax=255) output.thumbnail((snap(output.size[0]*scale), snap(output.size[1]*scale)), PIL.Image.ANTIALIAS) return np.asarray(output) @@ -440,7 +446,7 @@ def do_match_patches(self, layer): # Compute the score of each patch, taking into account statistics from previous iteration. This equalizes # the chances of the patches being selected when the user requests more variety. offset = self.matcher_history[layer].reshape((-1, 1)) - scores = dist - offset * args.variety + scores = dist - offset # Pick the best style patches for each patch in the current image, the result is an array of indices. # Also return the maximum value along both axis, used to compare slices and add patch variety. return [scores.argmax(axis=0), scores.max(axis=0), dist.max(axis=1)] @@ -511,10 +517,7 @@ def iterate_batches(self, *arrays, batch_size): excerpt = indices[index:index + batch_size] yield excerpt, [a[excerpt] for a in arrays] - def evaluate_slices(self, f, l): - if args.cache and l in self.style_cache: - return self.style_cache[l] - + def evaluate_slices(self, f, l, v): layer, data = self.model.network['nn'+l], self.style_data[l] history = data[-1] @@ -532,10 +535,8 @@ def evaluate_slices(self, f, l): best_idx[i] = idx[cur_idx[i]] best_val[i] = cur_val[i] - history[idx] = cur_match + history[idx] = cur_match * v - if args.cache: - self.style_cache[l] = best_idx return best_idx, best_val def evaluate(self, Xn): @@ -543,7 +544,7 @@ def evaluate(self, Xn): """ if args.print_every and self.frame % args.print_every == 0: - print('{:>3} {}layer{}'.format(self.frame, ansi.BOLD, ansi.ENDC), end='', flush=True) + print('{:>3} {}patches{}'.format(self.frame, ansi.BOLD, ansi.ENDC), end='', flush=True) # Adjust the representation to be compatible with the model before computing results. current_img = Xn.reshape(self.content_img.shape).astype(np.float32) / 127.5 - 1.0 @@ -552,21 +553,21 @@ def evaluate(self, Xn): # Iterate through each of the style layers one by one, computing best matches. desired_feature = current_features[0] - for l, balance, current_feature, compute in zip(args.layers, args.balance, current_features, self.compute_output): + for l, balance, variety, current_feature, compute in zip(args.layers, args.balance, args.variety, current_features, self.compute_output): f = np.copy(desired_feature) self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) # Compute best matching patches this style layer, going through all slices. - warmup = bool(self.iteration == 0 and args.variety > 0.0) + warmup = bool(self.iteration == 0 and variety > 0.0) for _ in range(2 if warmup else 1): - best_idx, best_val = self.evaluate_slices(f, l) + best_idx, best_val = self.evaluate_slices(f, l, variety) patches = self.style_data[l][0] - using = 100.0 * len(set(best_idx)) / best_idx.shape[0] - dupes = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] + used = 100.0 * len(set(best_idx)) / best_idx.shape[0] + dups = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] self.error = best_val.mean() - print(' {}{}{} patches {:2.0f}% dupes {:2.0f}% '.format(ansi.BOLD, l, ansi.ENDC, using, dupes), end='', flush=True) + print(' {}{}{} used {:2.0f}% dups {:2.0f}% '.format(ansi.BOLD, l, ansi.ENDC, used, dups), end='', flush=True) current_best = patches[best_idx].astype(np.float32) channels = self.model.channels[l] From 5fbc79f9ebb0994ccec1dac1d1a1afdf3bf45c9a Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Thu, 12 May 2016 21:16:31 +0200 Subject: [PATCH 14/58] Support for network that goes all the way up to 6_1. --- doodle.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/doodle.py b/doodle.py index a9322e7..c2c767f 100755 --- a/doodle.py +++ b/doodle.py @@ -118,13 +118,13 @@ def setup_model(self, previous=None): net, self.channels = {}, {} net['map'] = InputLayer((1, 1, None, None)) - for j in range(5): + for j in range(6): net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') def DecvLayer(copy, previous, channels, **params): # Dynamically injects intermediate pitstop layers in the encoder based on what the user - # specified as layers. It's rather inelegant... Needs a rework! + # specified as layers. It's rather inelegant... Needs a rework! if copy in args.layers: if len(self.tensor_latent) > 0: l = self.tensor_latent[-1][0] @@ -154,10 +154,18 @@ def DecvLayer(copy, previous, channels, **params): net['enc4_3'] = ConvLayer(net['enc4_2'], 256, 3, pad=1, **custom) net['enc4_4'] = ConvLayer(net['enc4_3'], 256, 3, pad=1, **custom) net['enc5_1'] = ConvLayer(net['enc4_4'], 512, 2, pad=0, stride=(2,2), **custom) + net['enc5_2'] = ConvLayer(net['enc5_1'], 512, 3, pad=1, **custom) + net['enc5_3'] = ConvLayer(net['enc5_2'], 512, 3, pad=1, **custom) + net['enc5_4'] = ConvLayer(net['enc5_3'], 512, 3, pad=1, **custom) + net['enc6_1'] = ConvLayer(net['enc5_4'], 768, 2, pad=0, stride=(2,2), **custom) # Decoder part of the neural network, takes abstract patterns and converts them into an image! self.tensor_latent = [] - net['dec5_1'] = DecvLayer('5_1', net['enc5_1'], 256) + net['dec6_1'] = DecvLayer('6_1', net['enc6_1'], 512) + net['dec5_4'] = DecvLayer('5_4', net['dec6_1'], 512) + net['dec5_3'] = DecvLayer('5_3', net['dec5_4'], 512) + net['dec5_2'] = DecvLayer('5_2', net['dec5_3'], 512) + net['dec5_1'] = DecvLayer('5_1', net['dec5_2'], 256) net['dec4_4'] = DecvLayer('4_4', net['dec5_1'], 256) net['dec4_3'] = DecvLayer('4_3', net['dec4_4'], 256) net['dec4_2'] = DecvLayer('4_2', net['dec4_3'], 256) @@ -176,11 +184,11 @@ def DecvLayer(copy, previous, channels, **params): net['out'+l] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) # Auxiliary network for the semantic layers, and the nearest neighbors calculations. - for j, i in itertools.product(range(5), range(4)): + for j, i in itertools.product(range(6), range(4)): suffix = '%i_%i' % (j+1, i+1) if 'enc'+suffix not in net: continue - self.channels[suffix] = net['enc'+suffix].num_filters + self.channels[suffix] = net['enc'+suffix].num_filters if args.semantic_weight > 0.0: net['sem'+suffix] = ConcatLayer([net['enc'+suffix], net['map%i'%(j+1)]]) else: From f3edeabb2eceda42e8f1aad67b4a6c16f0133e76 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 13 May 2016 09:09:10 +0200 Subject: [PATCH 15/58] Layer-wise generation with micro-iterations to match patches. --- doodle.py | 61 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/doodle.py b/doodle.py index c2c767f..0750a76 100755 --- a/doodle.py +++ b/doodle.py @@ -128,7 +128,10 @@ def DecvLayer(copy, previous, channels, **params): if copy in args.layers: if len(self.tensor_latent) > 0: l = self.tensor_latent[-1][0] - net['out'+l] = ConcatLayer([previous, net['map%i'%(int(l[0])-1)]]) + if args.semantic_weight > 0.0: + net['out'+l] = ConcatLayer([previous, net['map%i'%(int(l[0])-1)]]) + else: + net['out'+l] = previous self.tensor_latent.append((copy, T.tensor4())) net['lat'+copy] = InputLayer((1, previous.num_filters, None, None), var=self.tensor_latent[-1][1]) @@ -179,7 +182,7 @@ def DecvLayer(copy, previous, channels, **params): net['dec1_2'] = DecvLayer('1_2', net['dec2_1'], 32) net['dec1_1'] = DecvLayer('1_1', net['dec1_2'], 3, nonlinearity=lasagne.nonlinearities.tanh) net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec1_1']) - + l = self.tensor_latent[-1][0] net['out'+l] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) @@ -294,7 +297,10 @@ def __init__(self): if self.content_img_original is None and args.output_size: shape = tuple([int(i) for i in args.output_size.split('x')]) else: - shape = self.style_img_original.shape[:2] + if self.content_img_original is None: + shape = self.style_img_original.shape[:2] + else: + shape = self.content_img_original.shape[:2] self.content_map_original = np.zeros(shape+(3,)) args.semantic_weight = 0.0 @@ -360,7 +366,7 @@ def normalize_components(self, layer, array, norms): def rescale_image(self, img, scale): """Re-implementing skimage.transform.scale without the extra dependency. Saves a lot of space and hassle! """ - def snap(value, grid=2**int(args.layers[0][0])): return int(grid * math.floor(value / grid)) + def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floor(value / grid)) output = scipy.misc.toimage(img, cmin=0.0, cmax=255) output.thumbnail((snap(output.size[0]*scale), snap(output.size[1]*scale)), PIL.Image.ANTIALIAS) return np.asarray(output) @@ -562,29 +568,34 @@ def evaluate(self, Xn): desired_feature = current_features[0] for l, balance, variety, current_feature, compute in zip(args.layers, args.balance, args.variety, current_features, self.compute_output): - f = np.copy(desired_feature) - self.normalize_components(l, f, self.compute_norms(np, l, f)) - self.matcher_tensors[l].set_value(f) - - # Compute best matching patches this style layer, going through all slices. - warmup = bool(self.iteration == 0 and variety > 0.0) - for _ in range(2 if warmup else 1): - best_idx, best_val = self.evaluate_slices(f, l, variety) - - patches = self.style_data[l][0] - used = 100.0 * len(set(best_idx)) / best_idx.shape[0] - dups = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] - self.error = best_val.mean() - print(' {}{}{} used {:2.0f}% dups {:2.0f}% '.format(ansi.BOLD, l, ansi.ENDC, used, dups), end='', flush=True) - current_best = patches[best_idx].astype(np.float32) - channels = self.model.channels[l] - better_patches = current_best[:,:channels].transpose((0, 2, 3, 1)) - better_shape = f.shape[2:] + (channels,) - better_features = reconstruct_from_patches_2d(better_patches, better_shape) + f = np.copy(desired_feature) - f = (1.0 - balance) * current_feature[:,:channels]\ - + (0.0 + balance) * better_features.transpose((2, 0, 1))[np.newaxis] + for j in range(2): + self.normalize_components(l, f, self.compute_norms(np, l, f)) + self.matcher_tensors[l].set_value(f) + + # Compute best matching patches this style layer, going through all slices. + warmup = bool(self.iteration == 0 and variety > 0.0 and j == 0) + for _ in range(2 if warmup else 1): + best_idx, best_val = self.evaluate_slices(f, l, variety) + + patches = self.style_data[l][0] + used = 100.0 * len(set(best_idx)) / best_idx.shape[0] + dups = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] + self.error = best_val.mean() + print(' {}{}{} used {:2.0f}% dups {:2.0f}% '.format(ansi.BOLD, l, ansi.ENDC, used, dups), end='', flush=True) + current_best = patches[best_idx].astype(np.float32) + + better_patches = current_best.transpose((0, 2, 3, 1)) + better_features = reconstruct_from_patches_2d(better_patches, f.shape[2:] + (f.shape[1],)) + better_features = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] + f = better_features + print('') + assert f.shape == desired_feature.shape + + f = (1.0 - balance) * current_feature[:,:channels] + (0.0 + balance) * better_features[:,:channels] + print(f.shape, self.content_map.shape) desired_feature = compute(f, self.content_map) if np.isnan(desired_feature).any(): From 9a188b8ba3a48296b46a7a64472435f07a4d3d68 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 13 May 2016 11:08:59 +0200 Subject: [PATCH 16/58] Fully switching to a layerwise rather than iterative approach, removing un-used code. --- doodle.py | 152 ++++++++++++++++++++---------------------------------- 1 file changed, 57 insertions(+), 95 deletions(-) diff --git a/doodle.py b/doodle.py index 0750a76..23777ff 100755 --- a/doodle.py +++ b/doodle.py @@ -29,19 +29,16 @@ add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') add_arg('--variety', default=[0.0], nargs='+', type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') add_arg('--layers', default=['4_1'], nargs='+', type=str, help='The layer with which to match content.') +add_arg('--iterations', default=[2], nargs='+', type=int, help='Number of iterations to run at each resolution.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') -add_arg('--iterations', default=3, type=int, help='Number of iterations to run at each resolution.') -add_arg('--phases', default=2, type=int, help='Number of image scales to process in phases.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') add_arg('--seed', default='content', type=str, help='Seed image path, "noise" or "content".') add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') -add_arg('--print-every', default=1, type=int, help='How often to log statistics to stdout.') -add_arg('--save-every', default=1, type=int, help='How frequently to save PNG into `frames`.') args = parser.parse_args() @@ -264,8 +261,6 @@ def __init__(self): self.start_time = time.time() # Prepare file output and load files specified as input. - if args.save_every is not None: - os.makedirs('frames', exist_ok=True) if args.output is not None and os.path.isfile(args.output): os.remove(args.output) @@ -556,9 +551,7 @@ def evaluate_slices(self, f, l, v): def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ - - if args.print_every and self.frame % args.print_every == 0: - print('{:>3} {}patches{}'.format(self.frame, ansi.BOLD, ansi.ENDC), end='', flush=True) + frame = 0 # Adjust the representation to be compatible with the model before computing results. current_img = Xn.reshape(self.content_img.shape).astype(np.float32) / 127.5 - 1.0 @@ -567,119 +560,88 @@ def evaluate(self, Xn): # Iterate through each of the style layers one by one, computing best matches. desired_feature = current_features[0] - for l, balance, variety, current_feature, compute in zip(args.layers, args.balance, args.variety, current_features, self.compute_output): + for l, iterations, balance, variety, current_feature, compute in\ + zip(args.layers, args.iterations, args.balance, args.variety, current_features, self.compute_output): + + print('{}patches {}{}'.format(ansi.BOLD, l, ansi.ENDC)) + + self.iter_time = time.time() channels = self.model.channels[l] f = np.copy(desired_feature) - for j in range(2): + for j in range(iterations): self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) # Compute best matching patches this style layer, going through all slices. - warmup = bool(self.iteration == 0 and variety > 0.0 and j == 0) + warmup = bool(j == 0 and variety > 0.0) for _ in range(2 if warmup else 1): best_idx, best_val = self.evaluate_slices(f, l, variety) patches = self.style_data[l][0] - used = 100.0 * len(set(best_idx)) / best_idx.shape[0] - dups = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] - self.error = best_val.mean() - print(' {}{}{} used {:2.0f}% dups {:2.0f}% '.format(ansi.BOLD, l, ansi.ENDC, used, dups), end='', flush=True) current_best = patches[best_idx].astype(np.float32) better_patches = current_best.transpose((0, 2, 3, 1)) better_features = reconstruct_from_patches_2d(better_patches, f.shape[2:] + (f.shape[1],)) better_features = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] f = better_features - print('') - assert f.shape == desired_feature.shape - - f = (1.0 - balance) * current_feature[:,:channels] + (0.0 + balance) * better_features[:,:channels] - print(f.shape, self.content_map.shape) - desired_feature = compute(f, self.content_map) - if np.isnan(desired_feature).any(): - raise OverflowError("Optimization diverged; try using a different device or parameters.") - # Dump the image to disk if requested by the user. - if args.save_every and self.frame % args.save_every == 0: - frame = Xn.reshape(self.content_img.shape[1:]) - resolution = self.content_img_original.shape - image = scipy.misc.toimage(self.model.finalize_image(frame, resolution), cmin=0, cmax=255) - image.save('frames/%04d.png'%self.frame) + used = 100.0 * len(set(best_idx)) / best_idx.shape[0] + dups = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] + err = best_val.mean() + print('{:>3} used {:2.0f}% dups {:2.0f}% loss {:3.1f}'.format(frame, used, dups, err), end='') - # Print more information to the console every few iterations. - if args.print_every and self.frame % args.print_every == 0: - current_time = time.time() - print(' {}time{} {:3.1f}s '.format(ansi.BOLD, ansi.ENDC, current_time - self.iter_time), flush=True) - self.iter_time = current_time + current_time = time.time() + print(' {}time{} {:3.1f}s '.format(ansi.BOLD, ansi.ENDC, current_time - self.iter_time), flush=True) + self.iter_time = current_time + frame += 1 - # Update counters and timers. - self.frame += 1 - self.iteration += 1 + f = (1.0 - balance) * current_feature[:,:channels] + (0.0 + balance) * better_features[:,:channels] + desired_feature = compute(f, self.content_map) return desired_feature def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ - self.frame, Xn = 0, None - for i in range(args.phases): - self.error = 255.0 - scale = 1.0 / 2.0 ** (args.phases - 1 - i) - - shape = self.content_img_original.shape - print('\n{}Phase #{}: resolution {}x{} scale {}{}'\ - .format(ansi.BLUE_B, i, int(shape[1]*scale), int(shape[0]*scale), scale, ansi.BLUE)) - - # Precompute all necessary data for the various layers, put patches in place into augmented network. - self.model.setup(layers=['sem'+l for l in args.layers]) - self.prepare_content(scale) - self.prepare_style(scale) - - # Now setup the model with the new data, ready for the optimization loop. - self.model.setup(layers=['sem'+l for l in args.layers] + ['out'+l for l in args.layers]) - self.prepare_optimization() - print('{}'.format(ansi.ENDC)) - - # Setup the seed for the optimization as specified by the user. - shape = self.content_img.shape[2:] - if args.seed == 'content': - Xn = (self.content_img[0] + 1.0) * 127.5 - elif args.seed == 'noise': - bounds = [int(i) for i in args.seed_range.split(':')] - Xn = np.random.uniform(bounds[0], bounds[1], shape + (3,)).astype(np.float32) - elif args.seed == 'previous': - Xn = scipy.misc.imresize(Xn[0], shape, interp='bicubic') - Xn = Xn.transpose((2, 0, 1))[np.newaxis] - elif os.path.exists(args.seed): - seed_image = scipy.ndimage.imread(args.seed, mode='RGB') - seed_image = scipy.misc.imresize(seed_image, shape, interp='bicubic') - self.seed_image = self.model.prepare_image(seed_image) - Xn = (self.seed_image[0] + 1.0) * 127.5 - if Xn is None: - error("Seed for optimization was not found. You can either...", - " - Set the `--seed` to `content` or `noise`.", " - Specify `--seed` as a valid filename.") - - # Optimization algorithm needs min and max bounds to prevent divergence. - data_bounds = np.zeros((np.product(Xn.shape), 2), dtype=np.float64) - data_bounds[:] = (0.0, 255.0) - - self.iter_time, self.iteration, interrupt = time.time(), 0, False - for _ in range(args.iterations): - Xn = self.evaluate(Xn) - - args.seed = 'previous' - resolution = self.content_img.shape - Xn = Xn.reshape(resolution) - - output = self.model.finalize_image(Xn[0], self.content_img_original.shape) - scipy.misc.toimage(output, cmin=0, cmax=255).save(args.output) - - interrupt = False - status = "finished in" if not interrupt else "interrupted at" - print('\n{}Optimization {} {:3.1f}s, average patch error {:3.1f}!{}\n'\ - .format(ansi.CYAN, status, time.time() - self.start_time, self.error, ansi.ENDC)) + self.frame = 0 + + shape = self.content_img_original.shape + print('\n{}Resolution {}x{}{}'\ + .format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE)) + + # Precompute all necessary data for the various layers, put patches in place into augmented network. + self.model.setup(layers=['sem'+l for l in args.layers]) + self.prepare_content() + self.prepare_style() + + # Now setup the model with the new data, ready for the optimization loop. + self.model.setup(layers=['sem'+l for l in args.layers] + ['out'+l for l in args.layers]) + self.prepare_optimization() + print('{}'.format(ansi.ENDC)) + + # Setup the seed for the optimization as specified by the user. + shape = self.content_img.shape[2:] + if args.seed == 'content': + Xn = (self.content_img[0] + 1.0) * 127.5 + elif args.seed == 'noise': + bounds = [int(i) for i in args.seed_range.split(':')] + Xn = np.random.uniform(bounds[0], bounds[1], shape + (3,)).astype(np.float32) + elif os.path.exists(args.seed): + seed_image = scipy.ndimage.imread(args.seed, mode='RGB') + seed_image = scipy.misc.imresize(seed_image, shape, interp='bicubic') + self.seed_image = self.model.prepare_image(seed_image) + Xn = (self.seed_image[0] + 1.0) * 127.5 + if Xn is None: + error("Seed for optimization was not found. You can either...", + " - Set the `--seed` to `content` or `noise`.", " - Specify `--seed` as a valid filename.") + + Xn = self.evaluate(Xn) + output = self.model.finalize_image(Xn.reshape(self.content_img.shape[1:]), self.content_img_original.shape) + scipy.misc.toimage(output, cmin=0, cmax=255).save(args.output) + + print('\n{}Optimization finished in {:3.1f}s!{}\n'.format(ansi.CYAN, time.time()-self.start_time, ansi.ENDC)) if __name__ == "__main__": From 72361f879797fe89ae0e6a36b19059327c1b473d Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 13 May 2016 13:01:49 +0200 Subject: [PATCH 17/58] Simplifications to the iterations, better debug logging, reduce load times. --- doodle.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/doodle.py b/doodle.py index 23777ff..acba936 100755 --- a/doodle.py +++ b/doodle.py @@ -10,7 +10,6 @@ import os import sys -import bz2 import math import time import pickle @@ -202,18 +201,18 @@ def DecvLayer(copy, previous, channels, **params): def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - data_file = os.path.join(os.path.dirname(__file__), 'gelu2_conv.pkl.bz2') + data_file = os.path.join(os.path.dirname(__file__), 'gelu2_conv.pkl') if not os.path.exists(data_file): - error("Model file with pre-trained convolution layers not found. Download here...", + error("Model file with pre-trained convolution layers not found. Download here and extract...", "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu2_conv.pkl.bz2") - data = pickle.load(bz2.open(data_file, 'rb')) + data = pickle.load(open(data_file, 'rb')) for layer, values in data.items(): assert layer in self.network, "Layer `{}` not found as expected.".format(layer) for p, v in zip(self.network[layer].get_params(), values): assert p.get_value().shape == v.shape, "Layer `{}` in network has size {} but data is {}."\ .format(layer, v.shape, p.get_value().shape) - p.set_value(v) + p.set_value(v.astype(np.float32)) def setup(self, layers): """Setup the inputs and outputs, knowing the layers that are required by the optimization algorithm. @@ -552,31 +551,33 @@ def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ frame = 0 + iter_time = time.time() # Adjust the representation to be compatible with the model before computing results. current_img = Xn.reshape(self.content_img.shape).astype(np.float32) / 127.5 - 1.0 current_features = self.compute_features(current_img, self.content_map) + print('{}seed{} {:3.1f}s'.format(ansi.BOLD, ansi.ENDC, time.time() - iter_time)) + # Iterate through each of the style layers one by one, computing best matches. desired_feature = current_features[0] for l, iterations, balance, variety, current_feature, compute in\ zip(args.layers, args.iterations, args.balance, args.variety, current_features, self.compute_output): - print('{}patches {}{}'.format(ansi.BOLD, l, ansi.ENDC)) + iter_time = time.time() - self.iter_time = time.time() channels = self.model.channels[l] f = np.copy(desired_feature) + print('{}patches {}{}'.format(ansi.BOLD, l, ansi.ENDC)) + for j in range(iterations): self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) # Compute best matching patches this style layer, going through all slices. - warmup = bool(j == 0 and variety > 0.0) - for _ in range(2 if warmup else 1): - best_idx, best_val = self.evaluate_slices(f, l, variety) + best_idx, best_val = self.evaluate_slices(f, l, variety) patches = self.style_data[l][0] current_best = patches[best_idx].astype(np.float32) @@ -586,15 +587,13 @@ def evaluate(self, Xn): better_features = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] f = better_features - used = 100.0 * len(set(best_idx)) / best_idx.shape[0] dups = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] - err = best_val.mean() - print('{:>3} used {:2.0f}% dups {:2.0f}% loss {:3.1f}'.format(frame, used, dups, err), end='') + err = 100.0 * best_val.mean() + print('{:>3} used {:2.0f}% dups {:2.0f}% match {:3.2e}'.format(frame, used, dups, err), end='') - current_time = time.time() - print(' {}time{} {:3.1f}s '.format(ansi.BOLD, ansi.ENDC, current_time - self.iter_time), flush=True) - self.iter_time = current_time + print(' {}time{} {:3.1f}s '.format(ansi.BOLD, ansi.ENDC, time.time() - iter_time), flush=True) + iter_time = time.time() frame += 1 f = (1.0 - balance) * current_feature[:,:channels] + (0.0 + balance) * better_features[:,:channels] From 9af96f5d89577d9984f33f67a5690a8036f03302 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 13 May 2016 15:37:05 +0200 Subject: [PATCH 18/58] Refined the logging, removed unused seeding code. --- doodle.py | 68 +++++++++++++++++++------------------------------------ 1 file changed, 23 insertions(+), 45 deletions(-) diff --git a/doodle.py b/doodle.py index acba936..c6ec6e2 100755 --- a/doodle.py +++ b/doodle.py @@ -28,15 +28,13 @@ add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') add_arg('--variety', default=[0.0], nargs='+', type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') add_arg('--layers', default=['4_1'], nargs='+', type=str, help='The layer with which to match content.') -add_arg('--iterations', default=[2], nargs='+', type=int, help='Number of iterations to run at each resolution.') +add_arg('--iterations', default=[2], nargs='+', type=int, help='Number of iterations to run at each resolution.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') -add_arg('--seed', default='content', type=str, help='Seed image path, "noise" or "content".') -add_arg('--seed-range', default='16:240', type=str, help='Random colors chosen in range, e.g. 0:255.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') args = parser.parse_args() @@ -304,7 +302,7 @@ def __init__(self): args.semantic_weight = 0.0 if self.content_img_original is None: - self.content_img_original = np.zeros(self.content_map_original.shape[:2]+(3,)) + self.content_img_original = np.random.uniform(0, 255, self.content_map_original.shape[:2]+(3,)).astype(np.float32) args.content_weight = 0.0 if self.content_map_original.shape[2] != self.style_map_original.shape[2]: @@ -374,6 +372,14 @@ def prepare_content(self, scale=1.0): content_map = self.rescale_image(self.content_map_original, scale) self.content_map = content_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32) + # Feed-forward calculation only, returns the result of the convolution post-activation + self.compute_features = self.compile([self.model.tensor_img, self.model.tensor_map], + self.model.get_outputs('sem', args.layers)) + + self.content_features = self.compute_features(self.content_img, self.content_map) + for layer, current in zip(args.layers, self.content_features): + print(' - Layer {} as {} array in {:,}kb.'.format(layer, current.shape[1:], current.size//1000)) + def prepare_style(self, scale=1.0): """Called each phase of the optimization, process the style image according to the scale, then run it through the model to extract intermediate outputs (e.g. sem4_1) and turn them into patches. @@ -397,17 +403,13 @@ def prepare_style(self, scale=1.0): l.num_filters = patches.shape[0] // args.slices self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\ + [np.zeros((patches.shape[0],), dtype=np.float16)] - print(' - Style layer {}: {} patches in {:,}kb.'.format(layer, patches.shape, patches.size//1000)) + print(' - Layer {} as {} patches {} in {:,}kb.'.format(layer, patches.shape[:2], patches.shape[2:], patches.size//1000)) def prepare_optimization(self): """Optimization requires a function to compute the error (aka. loss) which is done in multiple components. Here we compile a function to run on the GPU that returns all components separately. """ - # Feed-forward calculation only, returns the result of the convolution post-activation - self.compute_features = self.compile([self.model.tensor_img, self.model.tensor_map], - self.model.get_outputs('sem', args.layers)) - # Patch matching calculation that uses only pre-calculated features and a slice of the patches. self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in args.layers} self.matcher_history = {l: T.vector() for l in args.layers} @@ -551,26 +553,17 @@ def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ frame = 0 - iter_time = time.time() - - # Adjust the representation to be compatible with the model before computing results. - current_img = Xn.reshape(self.content_img.shape).astype(np.float32) / 127.5 - 1.0 - current_features = self.compute_features(current_img, self.content_map) - - print('{}seed{} {:3.1f}s'.format(ansi.BOLD, ansi.ENDC, time.time() - iter_time)) # Iterate through each of the style layers one by one, computing best matches. - desired_feature = current_features[0] - + desired_feature = self.content_features[0] for l, iterations, balance, variety, current_feature, compute in\ - zip(args.layers, args.iterations, args.balance, args.variety, current_features, self.compute_output): + zip(args.layers, args.iterations, args.balance, args.variety, self.content_features, self.compute_output): iter_time = time.time() - channels = self.model.channels[l] f = np.copy(desired_feature) - print('{}patches {}{}'.format(ansi.BOLD, l, ansi.ENDC)) + print('\n{}Phase {}{}: variety {} balance {}{}'.format(ansi.CYAN_B, l, ansi.CYAN, variety, balance, ansi.ENDC)) for j in range(iterations): self.normalize_components(l, f, self.compute_norms(np, l, f)) @@ -589,10 +582,9 @@ def evaluate(self, Xn): used = 100.0 * len(set(best_idx)) / best_idx.shape[0] dups = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] - err = 100.0 * best_val.mean() - print('{:>3} used {:2.0f}% dups {:2.0f}% match {:3.2e}'.format(frame, used, dups, err), end='') + err = best_val.mean() + print('{:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'.format(frame, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - print(' {}time{} {:3.1f}s '.format(ansi.BOLD, ansi.ENDC, time.time() - iter_time), flush=True) iter_time = time.time() frame += 1 @@ -607,36 +599,22 @@ def run(self): self.frame = 0 shape = self.content_img_original.shape - print('\n{}Resolution {}x{}{}'\ - .format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE)) + print('\n{}Content {}x{}{}'.format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE)) # Precompute all necessary data for the various layers, put patches in place into augmented network. self.model.setup(layers=['sem'+l for l in args.layers]) self.prepare_content() + + shape = self.style_img_original.shape + print('\n{}Style {}x{}{}'.format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE)) self.prepare_style() # Now setup the model with the new data, ready for the optimization loop. self.model.setup(layers=['sem'+l for l in args.layers] + ['out'+l for l in args.layers]) self.prepare_optimization() - print('{}'.format(ansi.ENDC)) - - # Setup the seed for the optimization as specified by the user. - shape = self.content_img.shape[2:] - if args.seed == 'content': - Xn = (self.content_img[0] + 1.0) * 127.5 - elif args.seed == 'noise': - bounds = [int(i) for i in args.seed_range.split(':')] - Xn = np.random.uniform(bounds[0], bounds[1], shape + (3,)).astype(np.float32) - elif os.path.exists(args.seed): - seed_image = scipy.ndimage.imread(args.seed, mode='RGB') - seed_image = scipy.misc.imresize(seed_image, shape, interp='bicubic') - self.seed_image = self.model.prepare_image(seed_image) - Xn = (self.seed_image[0] + 1.0) * 127.5 - if Xn is None: - error("Seed for optimization was not found. You can either...", - " - Set the `--seed` to `content` or `noise`.", " - Specify `--seed` as a valid filename.") - - Xn = self.evaluate(Xn) + print('{}'.format(ansi.ENDC), end='') + + Xn = self.evaluate((self.content_img[0] + 1.0) * 127.5) output = self.model.finalize_image(Xn.reshape(self.content_img.shape[1:]), self.content_img_original.shape) scipy.misc.toimage(output, cmin=0, cmax=255).save(args.output) From fbb318f5d0dc816ea81d29c99ee9bcc991d0d005 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 13 May 2016 19:29:50 +0200 Subject: [PATCH 19/58] Major improvements to display, ASCII logo too! All code in forward branch is now AGPL. --- doodle.py | 98 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 46 deletions(-) diff --git a/doodle.py b/doodle.py index c6ec6e2..3014a13 100755 --- a/doodle.py +++ b/doodle.py @@ -1,8 +1,19 @@ #!/usr/bin/env python3 +""" _ _ _ _ + _ __ ___ _ _ _ __ __ _| | __| | ___ ___ __| | | ___ +| '_ \ / _ \ | | | '__/ _` | | / _` |/ _ \ / _ \ / _` | |/ _ \ +| | | | __/ |_| | | | (_| | | | (_| | (_) | (_) | (_| | | __/ +|_| |_|\___|\__,_|_| \__,_|_| \__,_|\___/ \___/ \__,_|_|\___| + +""" # -# Neural Doodle! # Copyright (c) 2016, Alex J. Champandard. # +# Neural Doodle is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General +# Public License version 3. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# +# # Research and Development sponsored by the nucl.ai Conference! # http://events.nucl.ai/ # July 18-20, 2016 in Vienna/Austria. @@ -15,21 +26,19 @@ import pickle import argparse import itertools -import collections # Configure all options first so we can later custom-load other libraries (Theano) based on device specified by user. parser = argparse.ArgumentParser(description='Generate a new image by applying style onto a content image.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) add_arg = parser.add_argument - add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') +add_arg('--layers', default=['6_1','5_1','4_1'], nargs='+', type=str, help='The layers/scales to process.') +add_arg('--variety', default=[0.2, 0.1, 0.0], nargs='+', type=float, help='Bias selection ofdiverse patches') add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') -add_arg('--variety', default=[0.0], nargs='+', type=float, help='Bias toward selecting diverse patches, e.g. 0.5.') -add_arg('--layers', default=['4_1'], nargs='+', type=str, help='The layer with which to match content.') -add_arg('--iterations', default=[2], nargs='+', type=int, help='Number of iterations to run at each resolution.') -add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') +add_arg('--iterations', default=[6,4,2], nargs='+', type=int, help='Number of iterations to run at each resolution.') +add_arg('--shapes', default=[3,3,2], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') @@ -41,7 +50,7 @@ #---------------------------------------------------------------------------------------------------------------------- -# Color coded output helps visualize the information a little better, plus looks cool! +# Color coded output helps visualize the information a little better, plus it looks cool! class ansi: BOLD = '\033[1;97m' WHITE = '\033[0;97m' @@ -54,21 +63,24 @@ class ansi: CYAN = '\033[0;36m' CYAN_B = '\033[1;36m' ENDC = '\033[0m' - + def error(message, *lines): string = "\n{}ERROR: " + message + "{}\n" + "\n".join(lines) + "{}\n" print(string.format(ansi.RED_B, ansi.RED, ansi.ENDC)) sys.exit(-1) -print("""{}NOTICE: This is R&D in progress. Terms and Conditions:{} +def extend(lst): + return itertools.chain(lst, itertools.repeat(lst[-1])) + +print("""{}NOTICE: This R&D branch is in progress. Terms and Conditions:{} - Trained models are for non-commercial use, no redistribution. - - For derived/inspired research, please cite this project.\n{}""".format(ansi.YELLOW_B, ansi.YELLOW, ansi.ENDC)) + - For derived/inspired research, please cite this project.{}""".format(ansi.YELLOW_B, ansi.YELLOW, ansi.ENDC)) -print('{}Neural Doodle for semantic style transfer.{}'.format(ansi.CYAN_B, ansi.ENDC)) +print('{} {}High-quality image synthesis powered by Deep Learning!{}'.format(ansi.CYAN_B, __doc__, ansi.ENDC)) # Load the underlying deep learning libraries based on the device specified. If you specify THEANO_FLAGS manually, # the code assumes you know what you are doing and they are not overriden! -os.environ.setdefault('THEANO_FLAGS', 'floatX=float32,device={},force_device=True,'\ +os.environ.setdefault('THEANO_FLAGS', 'floatX=float32,device={},force_device=True,allow_gc=True,'\ 'print_active_device=False'.format(args.device)) # Scientific & Imaging Libraries @@ -91,7 +103,7 @@ def error(message, *lines): from lasagne.layers import Conv2DLayer as ConvLayer, Deconv2DLayer as DeconvLayer, Pool2DLayer as PoolLayer from lasagne.layers import InputLayer, ConcatLayer -print('{} - Using device `{}` for processing the images.{}'.format(ansi.CYAN, theano.config.device, ansi.ENDC)) +print('{} - Using the device `{}` for heavy computation.{}'.format(ansi.CYAN, theano.config.device, ansi.ENDC)) #---------------------------------------------------------------------------------------------------------------------- @@ -201,7 +213,7 @@ def load_data(self): """ data_file = os.path.join(os.path.dirname(__file__), 'gelu2_conv.pkl') if not os.path.exists(data_file): - error("Model file with pre-trained convolution layers not found. Download here and extract...", + error("Model file with pre-trained convolution layers not found. Download here and unzip...", "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu2_conv.pkl.bz2") data = pickle.load(open(data_file, 'rb')) @@ -391,7 +403,7 @@ def prepare_style(self, scale=1.0): self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32) # Compile a function to run on the GPU to extract patches for all layers at once. - layer_patches = self.do_extract_patches(args.layers, self.model.get_outputs('sem', args.layers), args.shapes) + layer_patches = self.do_extract_patches(args.layers, self.model.get_outputs('sem', args.layers), extend(args.shapes)) extractor = self.compile([self.model.tensor_img, self.model.tensor_map], layer_patches) result = extractor(self.style_img, self.style_map) @@ -416,10 +428,9 @@ def prepare_optimization(self): self.matcher_inputs = {self.model.network['dup'+l]: self.matcher_tensors[l] for l in args.layers} nn_layers = [self.model.network['nn'+l] for l in args.layers] self.matcher_outputs = dict(zip(args.layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) + self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l)) for l in args.layers} - self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l))\ - for l in args.layers} - + # Decoding intermediate features into more specialized features and all the way to the output image. self.compute_output = [] for layer, (_, tensor_latent) in zip(args.layers, self.model.tensor_latent): output = lasagne.layers.get_output(self.model.network['out'+layer], @@ -470,7 +481,6 @@ def content_loss(self): """Return a list of Theano expressions for the error function, measuring how different the current image is from the reference content that was loaded. """ - content_loss = [] if args.content_weight == 0.0: return content_loss @@ -544,7 +554,6 @@ def evaluate_slices(self, f, l, v): i = np.where(cur_val > best_val) best_idx[i] = idx[cur_idx[i]] best_val[i] = cur_val[i] - history[idx] = cur_match * v return best_idx, best_val @@ -552,43 +561,42 @@ def evaluate_slices(self, f, l, v): def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ - frame = 0 + frame = 1 + parameters = zip(args.layers, extend(args.iterations), extend(args.balance), extend(args.variety)) # Iterate through each of the style layers one by one, computing best matches. - desired_feature = self.content_features[0] - for l, iterations, balance, variety, current_feature, compute in\ - zip(args.layers, args.iterations, args.balance, args.variety, self.content_features, self.compute_output): + desired_feature = np.copy(self.content_features[0]) + for parameter, current_feature, compute in zip(parameters, self.content_features, self.compute_output): + l, iterations, balance, variety = parameter - iter_time = time.time() - channels = self.model.channels[l] - f = np.copy(desired_feature) - - print('\n{}Phase {}{}: variety {} balance {}{}'.format(ansi.CYAN_B, l, ansi.CYAN, variety, balance, ansi.ENDC)) + print('\n{}Phase {}{}: variety {}, balance {}, iterations {}.{}'\ + .format(ansi.CYAN_B, l, ansi.CYAN, variety, balance, iterations, ansi.ENDC)) + channels, iter_time = self.model.channels[l], time.time() for j in range(iterations): - self.normalize_components(l, f, self.compute_norms(np, l, f)) - self.matcher_tensors[l].set_value(f) + self.normalize_components(l, desired_feature, self.compute_norms(np, l, desired_feature)) + self.matcher_tensors[l].set_value(desired_feature) # Compute best matching patches this style layer, going through all slices. - best_idx, best_val = self.evaluate_slices(f, l, variety) + best_idx, best_val = self.evaluate_slices(desired_feature, l, variety) patches = self.style_data[l][0] current_best = patches[best_idx].astype(np.float32) better_patches = current_best.transpose((0, 2, 3, 1)) - better_features = reconstruct_from_patches_2d(better_patches, f.shape[2:] + (f.shape[1],)) - better_features = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] - f = better_features + better_shape = desired_feature.shape[2:] + (desired_feature.shape[1],) + better_features = reconstruct_from_patches_2d(better_patches, better_shape) + desired_feature = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] - used = 100.0 * len(set(best_idx)) / best_idx.shape[0] - dups = 100.0 * len([v for v in collections.Counter(best_idx).values() if v>1]) / best_idx.shape[0] + used = 99.9 * len(set(best_idx)) / best_idx.shape[0] + dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] err = best_val.mean() - print('{:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'.format(frame, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) + print('{:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'.format(frame, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) iter_time = time.time() frame += 1 - f = (1.0 - balance) * current_feature[:,:channels] + (0.0 + balance) * better_features[:,:channels] + f = (1.0 - balance) * current_feature[:,:channels] + (0.0 + balance) * desired_feature[:,:channels] desired_feature = compute(f, self.content_map) return desired_feature @@ -596,17 +604,15 @@ def evaluate(self, Xn): def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ - self.frame = 0 - + # Precompute all features from the content image based on layers specified. shape = self.content_img_original.shape - print('\n{}Content {}x{}{}'.format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE)) - - # Precompute all necessary data for the various layers, put patches in place into augmented network. + print('\n{}Content {}x{}{} at scale {:3.1f}'.format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE, 1.0)) self.model.setup(layers=['sem'+l for l in args.layers]) self.prepare_content() + # Extract style patches from the texture image as specified by the user. shape = self.style_img_original.shape - print('\n{}Style {}x{}{}'.format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE)) + print('\n{}Style {}x{}{} at scale {:3.1f}'.format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE, 1.0)) self.prepare_style() # Now setup the model with the new data, ready for the optimization loop. From 7c5374f525360a91d2bc8dc6e5830e2ab942f41e Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 13 May 2016 20:08:19 +0200 Subject: [PATCH 20/58] Message to download the new model file as uncompressed. --- doodle.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doodle.py b/doodle.py index 3014a13..353407b 100755 --- a/doodle.py +++ b/doodle.py @@ -213,8 +213,8 @@ def load_data(self): """ data_file = os.path.join(os.path.dirname(__file__), 'gelu2_conv.pkl') if not os.path.exists(data_file): - error("Model file with pre-trained convolution layers not found. Download here and unzip...", - "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu2_conv.pkl.bz2") + error("Model file with pre-trained convolution layers not found. Download from here...", + "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu2_conv.pkl") data = pickle.load(open(data_file, 'rb')) for layer, values in data.items(): From 35daa9ac9b3c44da72d6424572fbc66837b7d5bd Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 13 May 2016 23:16:25 +0200 Subject: [PATCH 21/58] Re-implementing visualization of intermediate frames, 50% slower. --- doodle.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/doodle.py b/doodle.py index 353407b..1790729 100755 --- a/doodle.py +++ b/doodle.py @@ -43,6 +43,7 @@ add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') +add_arg('--frames', default=False, action='store_true', help='Render intermediate frames, takes more time.') add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') args = parser.parse_args() @@ -561,11 +562,13 @@ def evaluate_slices(self, f, l, v): def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ - frame = 1 + frame = 0 parameters = zip(args.layers, extend(args.iterations), extend(args.balance), extend(args.variety)) # Iterate through each of the style layers one by one, computing best matches. desired_feature = np.copy(self.content_features[0]) + self.render(frame, args.layers[0], self.content_features[0]) + for parameter, current_feature, compute in zip(parameters, self.content_features, self.compute_output): l, iterations, balance, variety = parameter @@ -593,14 +596,29 @@ def evaluate(self, Xn): err = best_val.mean() print('{:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'.format(frame, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - iter_time = time.time() frame += 1 + self.render(frame, l, desired_feature) + iter_time = time.time() f = (1.0 - balance) * current_feature[:,:channels] + (0.0 + balance) * desired_feature[:,:channels] desired_feature = compute(f, self.content_map) return desired_feature + def render(self, frame, layer, features): + if not args.frames: return + + found = False + for l, compute in zip(args.layers, self.compute_output): + if not found and layer != l: continue + found = True + features = compute(features[:,:self.model.channels[l]], self.content_map) + + output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_img_original.shape) + filename = os.path.basename(args.output) + scipy.misc.toimage(output, cmin=0, cmax=255).save('frames/{}-{:03d}-L{}.png'.format(filename, frame, layer[0])) + + def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ From da1c3d663f778ef81b5458080f4d4c3dcf21cda3 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sat, 14 May 2016 10:37:53 +0200 Subject: [PATCH 22/58] The blending occurs every frame rather than between phases, fixing the --balance parameter. --- doodle.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doodle.py b/doodle.py index 1790729..c4c640c 100755 --- a/doodle.py +++ b/doodle.py @@ -590,6 +590,7 @@ def evaluate(self, Xn): better_shape = desired_feature.shape[2:] + (desired_feature.shape[1],) better_features = reconstruct_from_patches_2d(better_patches, better_shape) desired_feature = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] + desired_feature = (1.0 - balance) * current_feature + (0.0 + balance) * desired_feature used = 99.9 * len(set(best_idx)) / best_idx.shape[0] dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] @@ -600,8 +601,7 @@ def evaluate(self, Xn): self.render(frame, l, desired_feature) iter_time = time.time() - f = (1.0 - balance) * current_feature[:,:channels] + (0.0 + balance) * desired_feature[:,:channels] - desired_feature = compute(f, self.content_map) + desired_feature = compute(desired_feature[:,:channels], self.content_map) return desired_feature @@ -615,7 +615,7 @@ def render(self, frame, layer, features): features = compute(features[:,:self.model.channels[l]], self.content_map) output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_img_original.shape) - filename = os.path.basename(args.output) + filename = os.path.splitext(os.path.basename(args.output))[0] scipy.misc.toimage(output, cmin=0, cmax=255).save('frames/{}-{:03d}-L{}.png'.format(filename, frame, layer[0])) From fef8036cd7ba50eb02f72ba3e587003aef231189 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sat, 14 May 2016 11:46:18 +0200 Subject: [PATCH 23/58] Tidying up the code, comments, docker build. --- docker-cpu.df | 4 ++-- docker-gpu.df | 4 ++-- doodle.py | 55 +++++++++++++++++++++------------------------------ 3 files changed, 27 insertions(+), 36 deletions(-) diff --git a/docker-cpu.df b/docker-cpu.df index 9a0d4be..515764e 100644 --- a/docker-cpu.df +++ b/docker-cpu.df @@ -36,8 +36,8 @@ RUN python3 -m pip install -r "requirements.txt" # Copy only required project files COPY doodle.py . -# Get a pre-trained neural network (VGG19) -RUN wget -q "https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2" +# Get a pre-trained neural network, non-commercial & attribution. (GELU2) +RUN wget -q "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu2_conv.pkl" # Set an entrypoint to the main doodle.py script ENTRYPOINT ["python3", "doodle.py", "--device=cpu"] diff --git a/docker-gpu.df b/docker-gpu.df index 6922475..b2edece 100644 --- a/docker-gpu.df +++ b/docker-gpu.df @@ -39,8 +39,8 @@ RUN python3 -m pip -q install -r "requirements.txt" # Copy only required project files COPY doodle.py . -# Get a pre-trained neural network (VGG19) -RUN wget -q "https://github.com/alexjc/neural-doodle/releases/download/v0.0/vgg19_conv.pkl.bz2" +# Get a pre-trained neural network, non-commercial & attribution. (GELU2) +RUN wget -q "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu2_conv.pkl" # Set an entrypoint to the main doodle.py script ENTRYPOINT ["python3", "doodle.py", "--device=gpu"] diff --git a/doodle.py b/doodle.py index c4c640c..64410d5 100755 --- a/doodle.py +++ b/doodle.py @@ -32,20 +32,20 @@ parser = argparse.ArgumentParser(description='Generate a new image by applying style onto a content image.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) add_arg = parser.add_argument -add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') -add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') -add_arg('--layers', default=['6_1','5_1','4_1'], nargs='+', type=str, help='The layers/scales to process.') -add_arg('--variety', default=[0.2, 0.1, 0.0], nargs='+', type=float, help='Bias selection ofdiverse patches') -add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') -add_arg('--iterations', default=[6,4,2], nargs='+', type=int, help='Number of iterations to run at each resolution.') -add_arg('--shapes', default=[3,3,2], nargs='+', type=int, help='Size of kernels used for patch extraction.') -add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') -add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. features.') -add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') -add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') -add_arg('--frames', default=False, action='store_true', help='Render intermediate frames, takes more time.') -add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') -add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') +add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') +add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') +add_arg('--layers', default=['6_1','5_1','4_1'], nargs='+', type=str, help='The layers/scales to process.') +add_arg('--variety', default=[0.2, 0.1, 0.0], nargs='+', type=float, help='Bias selecting diverse patches') +add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') +add_arg('--iterations', default=[6,4,2], nargs='+', type=int, help='Number of iterations to run in each phase.') +add_arg('--shapes', default=[3,3,2], nargs='+', type=int, help='Size of kernels used for patch extraction.') +add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') +add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. style features.') +add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') +add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') +add_arg('--frames', default=False, action='store_true', help='Render intermediate frames, takes more time.') +add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') +add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') args = parser.parse_args() @@ -70,8 +70,8 @@ def error(message, *lines): print(string.format(ansi.RED_B, ansi.RED, ansi.ENDC)) sys.exit(-1) -def extend(lst): - return itertools.chain(lst, itertools.repeat(lst[-1])) +def extend(lst): return itertools.chain(lst, itertools.repeat(lst[-1])) +def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floor(value / grid)) print("""{}NOTICE: This R&D branch is in progress. Terms and Conditions:{} - Trained models are for non-commercial use, no redistribution. @@ -128,9 +128,8 @@ def setup_model(self, previous=None): for j in range(6): net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') - def DecvLayer(copy, previous, channels, **params): - # Dynamically injects intermediate pitstop layers in the encoder based on what the user + # Dynamically injects intermediate "pitstop" output layers in the decoder based on what the user # specified as layers. It's rather inelegant... Needs a rework! if copy in args.layers: if len(self.tensor_latent) > 0: @@ -240,17 +239,15 @@ def get_outputs(self, type, layers): return [self.tensor_outputs[type+l] for l in layers] def prepare_image(self, image): - """Given an image loaded from disk, turn it into a representation compatible with the model. - The format is (b,c,y,x) with batch=1 for a single image, channels=3 for RGB, and y,x matching - the resolution. + """Given an image loaded from disk, turn it into a representation compatible with the model. The format is + (b,c,y,x) with batch=1 for a single image, channels=3 for RGB, and y,x matching the resolution. """ image = np.swapaxes(np.swapaxes(image, 1, 2), 0, 1)[::-1, :, :] image = image.astype(np.float32) / 127.5 - 1.0 return image[np.newaxis] def finalize_image(self, image, resolution): - """Based on the output of the neural network, convert it into an image format that can be saved - to disk -- shuffling dimensions as appropriate. + """Convert network output into an image format that can be saved to disk, shuffling dimensions as appropriate. """ image = np.swapaxes(np.swapaxes(image[::-1], 0, 1), 1, 2) image = np.clip(image, 0, 255).astype('uint8') @@ -371,7 +368,6 @@ def normalize_components(self, layer, array, norms): def rescale_image(self, img, scale): """Re-implementing skimage.transform.scale without the extra dependency. Saves a lot of space and hassle! """ - def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floor(value / grid)) output = scipy.misc.toimage(img, cmin=0.0, cmax=255) output.thumbnail((snap(output.size[0]*scale), snap(output.size[1]*scale)), PIL.Image.ANTIALIAS) return np.asarray(output) @@ -381,7 +377,6 @@ def prepare_content(self, scale=1.0): """ content_img = self.rescale_image(self.content_img_original, scale) self.content_img = self.model.prepare_image(content_img) - content_map = self.rescale_image(self.content_map_original, scale) self.content_map = content_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32) @@ -399,7 +394,6 @@ def prepare_style(self, scale=1.0): """ style_img = self.rescale_image(self.style_img_original, scale) self.style_img = self.model.prepare_image(style_img) - style_map = self.rescale_image(self.style_map_original, scale) self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32) @@ -422,7 +416,6 @@ def prepare_optimization(self): """Optimization requires a function to compute the error (aka. loss) which is done in multiple components. Here we compile a function to run on the GPU that returns all components separately. """ - # Patch matching calculation that uses only pre-calculated features and a slice of the patches. self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in args.layers} self.matcher_history = {l: T.vector() for l in args.layers} @@ -606,12 +599,10 @@ def evaluate(self, Xn): return desired_feature def render(self, frame, layer, features): + """Decode features at a specific layer and save the result to disk for visualization. (Takes 50% more time.) + """ if not args.frames: return - - found = False - for l, compute in zip(args.layers, self.compute_output): - if not found and layer != l: continue - found = True + for l, compute in list(zip(args.layers, self.compute_output))[args.layers.index(layer):]: features = compute(features[:,:self.model.channels[l]], self.content_map) output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_img_original.shape) From 57a5e24035ece925651d52e52338c8dbc536dca9 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sat, 14 May 2016 14:22:30 +0200 Subject: [PATCH 24/58] Re-ordered code, removed loss calculation, improved logging. --- doodle.py | 238 +++++++++++++++++++----------------------------------- 1 file changed, 83 insertions(+), 155 deletions(-) diff --git a/doodle.py b/doodle.py index 64410d5..580a99c 100755 --- a/doodle.py +++ b/doodle.py @@ -73,11 +73,8 @@ def error(message, *lines): def extend(lst): return itertools.chain(lst, itertools.repeat(lst[-1])) def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floor(value / grid)) -print("""{}NOTICE: This R&D branch is in progress. Terms and Conditions:{} - - Trained models are for non-commercial use, no redistribution. - - For derived/inspired research, please cite this project.{}""".format(ansi.YELLOW_B, ansi.YELLOW, ansi.ENDC)) - -print('{} {}High-quality image synthesis powered by Deep Learning!{}'.format(ansi.CYAN_B, __doc__, ansi.ENDC)) +print("""{} {}High-quality image synthesis powered by Deep Learning!{} + - Code licensed as AGPLv3, models under CC BY-NC-SA.{}""".format(ansi.CYAN_B, __doc__, ansi.CYAN, ansi.ENDC)) # Load the underlying deep learning libraries based on the device specified. If you specify THEANO_FLAGS manually, # the code assumes you know what you are doing and they are not overriden! @@ -268,57 +265,11 @@ def __init__(self): self.start_time = time.time() # Prepare file output and load files specified as input. + if args.frames is not False: + os.makedirs('frames', exist_ok=True) if args.output is not None and os.path.isfile(args.output): os.remove(args.output) - print(ansi.CYAN, end='') - target = args.content or args.output - self.content_img_original, self.content_map_original = self.load_images('content', target) - self.style_img_original, self.style_map_original = self.load_images('style', args.style) - - if self.content_map_original is None and self.content_img_original is None: - print(" - No content files found; result depends on seed only.") - print(ansi.ENDC, end='') - - # Display some useful errors if the user's input can't be undrestood. - if self.style_img_original is None: - error("Couldn't find style image as expected.", - " - Try making sure `{}` exists and is a valid image.".format(args.style)) - - if self.content_map_original is not None and self.style_map_original is None: - basename, _ = os.path.splitext(args.style) - error("Expecting a semantic map for the input style image too.", - " - Try creating the file `{}_sem.png` with your annotations.".format(basename)) - - if self.style_map_original is not None and self.content_map_original is None: - basename, _ = os.path.splitext(target) - error("Expecting a semantic map for the input content image too.", - " - Try creating the file `{}_sem.png` with your annotations.".format(basename)) - - if self.content_map_original is None: - if self.content_img_original is None and args.output_size: - shape = tuple([int(i) for i in args.output_size.split('x')]) - else: - if self.content_img_original is None: - shape = self.style_img_original.shape[:2] - else: - shape = self.content_img_original.shape[:2] - - self.content_map_original = np.zeros(shape+(3,)) - args.semantic_weight = 0.0 - - if self.style_map_original is None: - self.style_map_original = np.zeros(self.style_img_original.shape[:2]+(3,)) - args.semantic_weight = 0.0 - - if self.content_img_original is None: - self.content_img_original = np.random.uniform(0, 255, self.content_map_original.shape[:2]+(3,)).astype(np.float32) - args.content_weight = 0.0 - - if self.content_map_original.shape[2] != self.style_map_original.shape[2]: - error("Mismatch in number of channels for style and content semantic map.", - " - Make sure both images are RGB, RGBA, or L.") - # Finalize the parameters based on what we loaded, then create the model. args.semantic_weight = math.sqrt(9.0 / args.semantic_weight) if args.semantic_weight else 0.0 self.model = Model() @@ -328,7 +279,14 @@ def __init__(self): # Helper Functions #------------------------------------------------------------------------------------------------------------------ - def load_images(self, name, filename): + def rescale_image(self, img, scale): + """Re-implementing skimage.transform.scale without the extra dependency. Saves a lot of space and hassle! + """ + output = scipy.misc.toimage(img, cmin=0.0, cmax=255) + output.thumbnail((snap(output.size[0]*scale), snap(output.size[1]*scale)), PIL.Image.ANTIALIAS) + return np.asarray(output) + + def load_images(self, name, filename, scale=1.0): """If the image and map files exist, load them. Otherwise they'll be set to default values later. """ basename, _ = os.path.splitext(filename) @@ -336,6 +294,8 @@ def load_images(self, name, filename): img = scipy.ndimage.imread(filename, mode='RGB') if os.path.exists(filename) else None map = scipy.ndimage.imread(mapname) if os.path.exists(mapname) and args.semantic_weight > 0.0 else None + shp = img.shape if img is not None else (map.shape if map is not None else '??') + print('\n{}{} {}x{}{} at scale {:3.1f}'.format(ansi.BLUE_B, name.capitalize(), shp[1], shp[0], ansi.BLUE, 1.0)) if img is not None: print(' - Loading `{}` for {} data.'.format(filename, name)) if map is not None: print(' - Adding `{}` as semantic map.'.format(mapname)) @@ -343,7 +303,7 @@ def load_images(self, name, filename): error("The {} image and its semantic map have different resolutions. Either:".format(name), " - Resize {} to {}, or\n - Resize {} to {}."\ .format(filename, map.shape[1::-1], mapname, img.shape[1::-1])) - return img, map + return [(self.rescale_image(i, scale) if i is not None else None) for i in [img, map]] def compile(self, arguments, function): """Build a Theano function that will run the specified expression on the GPU. @@ -353,7 +313,7 @@ def compile(self, arguments, function): def compute_norms(self, backend, layer, array): ni = backend.sqrt(backend.sum(array[:,:self.model.channels[layer]] ** 2.0, axis=(1,), keepdims=True)) ns = backend.sqrt(backend.sum(array[:,self.model.channels[layer]:] ** 2.0, axis=(1,), keepdims=True)) - return [ni] + [ns] + return [ni, ns] def normalize_components(self, layer, array, norms): if args.semantic_weight > 0.0: @@ -365,37 +325,22 @@ def normalize_components(self, layer, array, norms): # Initialization & Setup #------------------------------------------------------------------------------------------------------------------ - def rescale_image(self, img, scale): - """Re-implementing skimage.transform.scale without the extra dependency. Saves a lot of space and hassle! - """ - output = scipy.misc.toimage(img, cmin=0.0, cmax=255) - output.thumbnail((snap(output.size[0]*scale), snap(output.size[1]*scale)), PIL.Image.ANTIALIAS) - return np.asarray(output) - - def prepare_content(self, scale=1.0): - """Called each phase of the optimization, rescale the original content image and its map to use as inputs. - """ - content_img = self.rescale_image(self.content_img_original, scale) - self.content_img = self.model.prepare_image(content_img) - content_map = self.rescale_image(self.content_map_original, scale) - self.content_map = content_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32) - - # Feed-forward calculation only, returns the result of the convolution post-activation - self.compute_features = self.compile([self.model.tensor_img, self.model.tensor_map], - self.model.get_outputs('sem', args.layers)) - - self.content_features = self.compute_features(self.content_img, self.content_map) - for layer, current in zip(args.layers, self.content_features): - print(' - Layer {} as {} array in {:,}kb.'.format(layer, current.shape[1:], current.size//1000)) - def prepare_style(self, scale=1.0): """Called each phase of the optimization, process the style image according to the scale, then run it through the model to extract intermediate outputs (e.g. sem4_1) and turn them into patches. """ - style_img = self.rescale_image(self.style_img_original, scale) - self.style_img = self.model.prepare_image(style_img) - style_map = self.rescale_image(self.style_map_original, scale) - self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32) + style_img_original, style_map_original = self.load_images('style', args.style, scale) + + if style_map_original is None: + style_map_original = np.zeros(style_img_original.shape[:2]+(2,)) - 1.0 + args.semantic_weight = 0.0 + + if style_img_original is None: + error("Couldn't find style image as expected.", + " - Try making sure `{}` exists and is a valid image.".format(args.style)) + + self.style_img = self.model.prepare_image(style_img_original) + self.style_map = style_map_original.transpose((2, 0, 1))[np.newaxis].astype(np.float32) # Compile a function to run on the GPU to extract patches for all layers at once. layer_patches = self.do_extract_patches(args.layers, self.model.get_outputs('sem', args.layers), extend(args.shapes)) @@ -405,16 +350,61 @@ def prepare_style(self, scale=1.0): # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. self.style_data = {} for layer, *data in zip(args.layers, result[0::3], result[1::3], result[2::3]): - patches = data[0] - l = self.model.network['nn'+layer] + patches, l = data[0], self.model.network['nn'+layer] l.num_filters = patches.shape[0] // args.slices self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\ + [np.zeros((patches.shape[0],), dtype=np.float16)] print(' - Layer {} as {} patches {} in {:,}kb.'.format(layer, patches.shape[:2], patches.shape[2:], patches.size//1000)) - def prepare_optimization(self): - """Optimization requires a function to compute the error (aka. loss) which is done in multiple components. - Here we compile a function to run on the GPU that returns all components separately. + def prepare_content(self, scale=1.0): + """Called each phase of the optimization, rescale the original content image and its map to use as inputs. + """ + content_img_original, content_map_original = self.load_images('content', args.content or args.output, scale) + + if content_map_original is not None and self.style_map is None: + basename, _ = os.path.splitext(args.style) + error("Expecting a semantic map for the input style image too.", + " - Try creating the file `{}_sem.png` with your annotations.".format(basename)) + + if self.style_map.max() >= 0.0 and content_map_original is None: + basename, _ = 'poo', 'face' # os.path.splitext(target) + error("Expecting a semantic map for the input content image too.", + " - Try creating the file `{}_sem.png` with your annotations.".format(basename)) + + if content_map_original is None: + if content_img_original is None and args.output_size: + shape = tuple([int(i) for i in args.output_size.split('x')]) + else: + if content_img_original is None: + shape = self.style_img_original.shape[:2] + else: + shape = content_img_original.shape[:2] + + content_map_original = np.zeros(shape+(2,)) + args.semantic_weight = 0.0 + + if content_img_original is None: + print(" - No content image found; seed was set to random noise.") + content_img_original = np.random.uniform(0, 255, content_map_original.shape[:2]+(3,)).astype(np.float32) + + if content_map_original.shape[2] != self.style_map.shape[1]: + error("Mismatch in number of channels for style and content semantic map.", + " - Make sure both images are RGB, RGBA, or L.") + + self.content_img = self.model.prepare_image(content_img_original) + self.content_map = content_map_original.transpose((2, 0, 1))[np.newaxis].astype(np.float32) + self.content_shape = content_img_original.shape + + # Feed-forward calculation only, returns the result of the convolution post-activation + self.compute_features = self.compile([self.model.tensor_img, self.model.tensor_map], + self.model.get_outputs('sem', args.layers)) + + self.content_features = self.compute_features(self.content_img, self.content_map) + for layer, current in zip(args.layers, self.content_features): + print(' - Layer {} as {} array in {:,}kb.'.format(layer, current.shape[1:], current.size//1000)) + + def prepare_generation(self): + """Layerwise synthesis images requires two sets of Theano functions to be compiled. """ # Patch matching calculation that uses only pre-calculated features and a slice of the patches. self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in args.layers} @@ -467,57 +457,6 @@ def do_match_patches(self, layer): return [scores.argmax(axis=0), scores.max(axis=0), dist.max(axis=1)] - #------------------------------------------------------------------------------------------------------------------ - # Error/Loss Functions - #------------------------------------------------------------------------------------------------------------------ - - def content_loss(self): - """Return a list of Theano expressions for the error function, measuring how different the current image is - from the reference content that was loaded. - """ - content_loss = [] - if args.content_weight == 0.0: - return content_loss - - # First extract all the features we need from the model, these results after convolution. - extractor = theano.function([self.model.tensor_img], self.model.get_outputs('enc', args.layers)) - result = extractor(self.content_img) - - # Build a list of loss components that compute the mean squared error by comparing current result to desired. - for l, ref in zip(args.layers, result): - layer = self.model.tensor_outputs['enc'+l] - loss = T.mean((layer - ref) ** 2.0) - content_loss.append(('content', l, args.content_weight * loss)) - print(' - Content layer conv{}: {} features in {:,}kb.'.format(l, ref.shape[1], ref.size//1000)) - return content_loss - - def style_loss(self): - """Returns a list of loss components as Theano expressions. Finds the best style patch for each patch in the - current image using normalized cross-correlation, then computes the mean squared error for all patches. - """ - style_loss = [] - if args.style_weight == 0.0: - return style_loss - - # Extract the patches from the current image, as well as their magnitude. - result = self.do_extract_patches(args.layers, self.model.get_outputs('enc', args.layers), args.shapes) - - # Multiple style layers are optimized separately, usually conv3_1 and conv4_1 — semantic data not used here. - for l, matches, patches in zip(args.layers, self.tensor_matches, result[0::3]): - # Compute the mean squared error between the current patch and the best matching style patch. - # Ignore the last channels (from semantic map) so errors returned are indicative of image only. - loss = T.mean((patches - matches[:,:self.model.channels[l]]) ** 2.0) - style_loss.append(('style', l, args.style_weight * loss)) - return style_loss - - def total_variation_loss(self): - """Return a loss component as Theano expression for the smoothness prior on the result image. - """ - x = self.model.tensor_img - loss = (((x[:,:,:-1,:-1] - x[:,:,1:,:-1])**2 + (x[:,:,:-1,:-1] - x[:,:,:-1,1:])**2)**1.25).mean() - return [('smooth', 'img', args.smoothness * loss)] - - #------------------------------------------------------------------------------------------------------------------ # Optimization Loop #------------------------------------------------------------------------------------------------------------------ @@ -605,32 +544,21 @@ def render(self, frame, layer, features): for l, compute in list(zip(args.layers, self.compute_output))[args.layers.index(layer):]: features = compute(features[:,:self.model.channels[l]], self.content_map) - output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_img_original.shape) + output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_shape) filename = os.path.splitext(os.path.basename(args.output))[0] scipy.misc.toimage(output, cmin=0, cmax=255).save('frames/{}-{:03d}-L{}.png'.format(filename, frame, layer[0])) - def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ - # Precompute all features from the content image based on layers specified. - shape = self.content_img_original.shape - print('\n{}Content {}x{}{} at scale {:3.1f}'.format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE, 1.0)) - self.model.setup(layers=['sem'+l for l in args.layers]) - self.prepare_content() - - # Extract style patches from the texture image as specified by the user. - shape = self.style_img_original.shape - print('\n{}Style {}x{}{} at scale {:3.1f}'.format(ansi.BLUE_B, shape[1], shape[0], ansi.BLUE, 1.0)) - self.prepare_style() - # Now setup the model with the new data, ready for the optimization loop. self.model.setup(layers=['sem'+l for l in args.layers] + ['out'+l for l in args.layers]) - self.prepare_optimization() - print('{}'.format(ansi.ENDC), end='') + self.prepare_style() + self.prepare_content() + self.prepare_generation() Xn = self.evaluate((self.content_img[0] + 1.0) * 127.5) - output = self.model.finalize_image(Xn.reshape(self.content_img.shape[1:]), self.content_img_original.shape) + output = self.model.finalize_image(Xn.reshape(self.content_img.shape[1:]), self.content_shape) scipy.misc.toimage(output, cmin=0, cmax=255).save(args.output) print('\n{}Optimization finished in {:3.1f}s!{}\n'.format(ansi.CYAN, time.time()-self.start_time, ansi.ENDC)) From a9377f939cc88adc4f2a9366bf05668fbba9247f Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 15 May 2016 11:04:19 +0200 Subject: [PATCH 25/58] Updating Lasagne to it includes Deconv2D, adding sklearn to requirements. --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5d8d2d2..98b13e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ colorama pillow>=3.2.0 Theano>=0.8.1 -git+https://github.com/Lasagne/Lasagne.git@0440814#egg=Lasagne==0.2-dev +git+https://github.com/Lasagne/Lasagne.git@31ac7d2#egg=Lasagne==0.2-dev +sklearn>=0.17.1 From c0f9c6931044f24d7fd2b422a0652e3669950563 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 15 May 2016 13:40:44 +0200 Subject: [PATCH 26/58] Fix for image resize by using PIL fit() rather than thumbnail. --- doodle.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doodle.py b/doodle.py index 580a99c..b45cdf9 100755 --- a/doodle.py +++ b/doodle.py @@ -84,7 +84,7 @@ def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floo # Scientific & Imaging Libraries import numpy as np import scipy.optimize, scipy.ndimage, scipy.misc -import PIL +import PIL.ImageOps from sklearn.feature_extraction.image import reconstruct_from_patches_2d # Numeric Computing (GPU) @@ -283,8 +283,7 @@ def rescale_image(self, img, scale): """Re-implementing skimage.transform.scale without the extra dependency. Saves a lot of space and hassle! """ output = scipy.misc.toimage(img, cmin=0.0, cmax=255) - output.thumbnail((snap(output.size[0]*scale), snap(output.size[1]*scale)), PIL.Image.ANTIALIAS) - return np.asarray(output) + return np.asarray(PIL.ImageOps.fit(output, [snap(dim*scale) for dim in output.size], PIL.Image.ANTIALIAS)) def load_images(self, name, filename, scale=1.0): """If the image and map files exist, load them. Otherwise they'll be set to default values later. From 5b7c28ff90a35c40cf69848cacbdc0a9ca24d4f7 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Mon, 16 May 2016 10:30:00 +0200 Subject: [PATCH 27/58] Updating examples in the README after testing. --- README.rst | 22 +++++++++------------- doodle.py | 10 +++++----- samples/Monet_sem.png | Bin 14679 -> 14186 bytes 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/README.rst b/README.rst index ab66426..0261457 100644 --- a/README.rst +++ b/README.rst @@ -41,11 +41,11 @@ The algorithm is built for style transfer, but can also generate image analogies # Synthesize a coastline as if painted by Monet. This uses "*_sem.png" masks for both images. python3 doodle.py --style samples/Monet.jpg --output samples/Coastline.png \ - --device=cpu --iterations=40 + --variety 0.0 0.2 --layers 4_1 3_1 --iterations 4 6 # Generate a scene around a lake in the style of a Renoir painting. python3 doodle.py --style samples/Renoir.jpg --output samples/Landscape.png \ - --device=gpu0 --iterations=80 + --variety 0.5 --layers 6_1 5_1 4_1 3_1 --iterations 4 Notice the Renoir results look a little better than the Monet. Some rotational variations of the source image could improve the quality of the arch outline in particular. @@ -58,12 +58,12 @@ If you want to transfer the style given a source style with annotations, and a t .. code:: bash # Synthesize a portrait of Seth Johnson like a Gogh portrait. This uses "*_sem.png" masks for both images. - python3 doodle.py --style samples/Gogh.jpg --content samples/Seth.png \ - --output SethAsGogh.png --device=cpu --phases=4 --iterations=40 + python3 doodle.py --content samples/Seth.jpg --style samples/Gogh.jpg \ + --variety 0.2 0.1 --balance 0.85 1.0 --layers 4_1 3_1 --iterations 6 # Generate what a photo of Vincent van Gogh would look like, using Seth's portrait as reference. - python3 doodle.py --style samples/Seth.jpg --content samples/Gogh.png \ - --output GoghAsSeth.png --device=gpu0 --phases=4 --iterations=80 + python3 doodle.py --content samples/Gogh.jpg --style samples/Seth.jpg \ + --variety 0.0 --balance 0.7 0.8 --layers 4_1 3_1 --iterations 4 To perform regular style transfer without semantic annotations, simply delete or rename the files with the semantic maps. The photo is originally by `Seth Johnson `_, and the concept for this style transfer by `Kyle McDonald `_. @@ -77,13 +77,9 @@ For synthesizing bitmap textures, you only need an input style without anotation .. code:: bash - # First synthesis uses a darker noise pattern as seed. - python3 doodle.py --style samples/Wall.jpg --output Wall.png\ - --seed=noise --seed-range=0:128 --iterations=50 --phases=3 - - # Second synthesis uses a lighter noise pattern as seed. - python3 doodle.py --style samples/Wall.jpg --output Wall.png\ - --seed=noise --seed-range=192:255 --iterations=50 --phases=3 + # Generate an image of stones based on the input photograph only. + python3 doodle.py --style samples/Stones.jpg --output Stones.png \ + --layers 5_1 4_1 3_1 --iterations 6 4 4 --variety 0.4 0.2 0.1 You can also control the output resolution using ``--output-size=512x512`` parameter—which also depends on the memory you have available. By default the size will be the same as the style image. diff --git a/doodle.py b/doodle.py index b45cdf9..08de7c1 100755 --- a/doodle.py +++ b/doodle.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" _ _ _ _ +""" _ _ _ _ _ __ ___ _ _ _ __ __ _| | __| | ___ ___ __| | | ___ | '_ \ / _ \ | | | '__/ _` | | / _` |/ _ \ / _ \ / _` | |/ _ \ | | | | __/ |_| | | | (_| | | | (_| | (_) | (_) | (_| | | __/ @@ -34,11 +34,11 @@ add_arg = parser.add_argument add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') -add_arg('--layers', default=['6_1','5_1','4_1'], nargs='+', type=str, help='The layers/scales to process.') +add_arg('--layers', default=['5_1','4_1','3_1'], nargs='+', type=str, help='The layers/scales to process.') add_arg('--variety', default=[0.2, 0.1, 0.0], nargs='+', type=float, help='Bias selecting diverse patches') add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') add_arg('--iterations', default=[6,4,2], nargs='+', type=int, help='Number of iterations to run in each phase.') -add_arg('--shapes', default=[3,3,2], nargs='+', type=int, help='Size of kernels used for patch extraction.') +add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. style features.') add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') @@ -73,7 +73,7 @@ def error(message, *lines): def extend(lst): return itertools.chain(lst, itertools.repeat(lst[-1])) def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floor(value / grid)) -print("""{} {}High-quality image synthesis powered by Deep Learning!{} +print("""{} {}High-quality image synthesis powered by Deep Learning!{} - Code licensed as AGPLv3, models under CC BY-NC-SA.{}""".format(ansi.CYAN_B, __doc__, ansi.CYAN, ansi.ENDC)) # Load the underlying deep learning libraries based on the device specified. If you specify THEANO_FLAGS manually, @@ -375,7 +375,7 @@ def prepare_content(self, scale=1.0): shape = tuple([int(i) for i in args.output_size.split('x')]) else: if content_img_original is None: - shape = self.style_img_original.shape[:2] + shape = self.style_img.shape[2:] else: shape = content_img_original.shape[:2] diff --git a/samples/Monet_sem.png b/samples/Monet_sem.png index 0320a46f5f19a5e97e291b5ac2c13382772176a4..43940e9614acc2b92cdd3a09abb16739e7efa5aa 100644 GIT binary patch literal 14186 zcmZX*by!s07e9IikZzFf7DQ0GQ(C$dX%M6Zq+x~*DQQ7kx~03M8%gQz?wosgzxUqX zeV+Tz%8bDrJLeuTv zL8_;h=0^1ubVTV*EUj5g=toBgzN&<85>m91Bzs1+M1@vSUdd&yx1Q%etLm!i_7+Ao z?Qp7fbsNt%)9N&X{m)TTEj7>LC99ZX#pO)!?IclaadDk~;J)GN8}xxE;l@M{FwAm! zjx4qw1xIz@e!48SIJ(fY78$9eqLs%-r42xe)gfBCe!e6tX|7|w+=eSDSML6Gkv(fJ zyQU}$0Gp%k>*QB$ds=f^XbCV>S_VR4H$E%Y9}ry0kjGww!Wy=xULnevPkrTdEqkP* zjD3&;K4>F;!G_^VN)FhN7gr1dsI=11d?v(<|I^ppy+9V z08M@e_(4d8`hSCd>2kUklD!IGq>)5Lqm2ouCf9OgCj_1vl5MkvF(tr%jTg_;AKnOZ z{5QsRVtSJ1jr#LhRAyf^T84@IY)V|NrxEOWVYuXZK^XthYWZ4Lf-;8~9iR;ez?JtC zP+*+y5cscG0RiV{^rr!1)P;S(e?)5hM<9d-r1Jm$$a+U-i6ALAvjQ_kaY4H7Ox=Dd zi3gy?z%nj8Rzv~G5X{~UJMX^ z@Vo0rRa$q$D?wz^j$fV} z`!wHhD@_ylm}xTzDMKniefz2o`CjXlBYU|SjZ~EUtoDvY>k6P0qT4&+l7x;+Hk{Xr zAJ|@zXl0(fgy2aA^#hgx%XRv~k73o8-y*=31GZl_;+NNHt$04|MWR=ViVL<}A<{@d zwK1d!G6d7_8Idv^8Ampm-0n#UwlxqP1;MVkS+d&DW=ef@W(cTRy)@2Qu}*-5`PDm0 zON|*sCsGz#GfoE7*H#cvIE6B8HR@ikSwLm!eZJ2gD8BDR5cC7NW;zL@rYl}FiDu}> z>>1lv?J`*c%$Ks7y1tDrDn`iSsA|t-4eu5vqo61<^n+^KmK;A^2M*&KeNNf8VAO2? zO!OK?Y|29XFG}QDPr%~ z9Ac07ov2GA5q1$lvV3PwvzDqcIqhMo1MZE1!tH{wm?Q3BK{(4JO=u0r!g8AJHC3ex zv>W+tqn{&asN=y+NBuW;OglmJxxa&%h<|M;7la;oyQ`;8!c(>@nHE0W_EAczH!ySm zw>&z^;F~)JTH`P8oo}+Ke=1s^wZ@!o&74iU07*@rj!n~U)uu7n?CcCx@8i;`jnP8D z1v>A(9I<=nLDt&?o2aWl%uC{r_oosxW;dIkbfdU-bH8tDy&3+Y&*v;KKJQUk6(@)h z+XPA=Bsqy7io{w@Y>a-HDDvmR7ja1`P9{)R&ylr;dbQn;D3slN<~(AXzaifQiNzy7HHi1Fng?`0{dm2pdS z?fAH~Ncj#%b4;526ER_^6Cc;);hm&d_KdNh3B`l%Cxi@1oM(|579H*KhaqQBLC9p> zOdx-^Mf$~5-nF?Z=&i(u-~1Fqi3(#f3Pe)6)GkwMSqX8uM8EDTHnRTck;=L$pbP3W z5Q9W}d|GyV&sEsS8%5&amO!89BwK^gcDb(%FU)i(Abf7~Z2-z@ki4oT|3L~Q|6^7S zI$PyyM##!4(fz+=BKN>8TV>NldiGsS1lpBu#9a7C5uWU^jKVM?P&(scI7zntJiPa% z`r<#iHcJlF5UO*YsrSEBx-1;B1UV{44dHjhy_$asS6;?tlSqM3eb;eYTx}5^BHbX8 zLhFR-Akp*{Qcqqn7CP~!Ac&)8`#=Dm_!*s`Jndl9YcVQgVD$6l^9>b*S^KL2K;9=)@p;n>nb8bTvo_E7|{MxMKK#?g=G*%s7A zwwC+G#451F*cgN1V=MbfQT1k1ZGhQD;J~5zArg~xu&5FOV9LP$Iyav*X`lT*uw-X* z;wEAhV}uESwOL^Q*=jy|8_p5#p#uteEE9)7j+pU+{^f`kb@$`p zo{m`M83Ly9rW)wGq!R-kCx}ZN+m9dVT!%Az9c(@jx3ADkyV}elap`#V|B+sb@n89% zWD5F&PH2_}KZOQ4I7)#lq!i(f7Hl$#i5|YybNo_Z<Nt=A9C543Tny#iQI79@_7B}Puem(S}mOp371oGpO6@+y{{uNf->qU`c z9JpQg=vwB{_C>q5QKW=~=1`*YqZJq4Q_S(MDAs0|dW0Vty~)nSzoV#PBn?AkjtrHX zf}%bX|Js+-*1&A^CaO}$Q|I#w9vEiV1=pvJwM>gW^t3_W;g0 zWVS6|G%i4^oi~D^?sX6VYt9MzJxT^nmNipsv14(mPUepkpBrxDTyKejkN}$?NLiVB zR^F|@c+WHJsu%q>J4WK>xSa=0tr!;{M&3Sw|JYhi%3+gWyAWB46|85e^M-#OP3cuT zel&W1QTMH+fR3%q+sxb;7e;?ojXp$Qv}fY)_W!*)8@GuieLjyNgS=K>{n1*S1+X&d zN_F_!f1gG^wVFKBpU9ggG$)5hAjbSbv^(wUGm0ccRSUUX>e2j;iEGs+*5IL}DlY(; z$>-BdX~!yK6(T5z(D7B8=9BX%v9#M0_sam9#1%nPa2Os>e0$0CR@rI`R1)N6$qH1J zc_U$KygL9F9rk!!S7C)UB<_*#;NVUIP7)F{kurLKJ=NIwQJ5hH*#3cn?$QkEc5l9% z$SF3RUk!&CFScHorI`o3laeyYt~;oj8~JH*R4$rl2knW&JgL7TzEWOKYp=wYRC2{| z@_g58>lMF}B48P)kiHxIrmTdjU{ADire*L|>J=adOe5@klaj?5alfJzejv~_#Uobn z$NdBpnr>R`U!OE&V3pApoyT8^I^i_84zK#4wT(>?+1!%i_|Sr?x`u7ybXjMbXYn=+ zy5K41r}LSPRSNawfZBWOl4Fx@T-rL?*a%JT#*%%ehmR?JCgr8H{^ew>u>R3|)6L-| zfh#`y@#7CI*#WvbdhMM{plVEt70F_+6im`yXSk;xKK$6hXl$_EV*2TEz$cQfTtxz> zmzWt&&t?QUxiFz+YVHzJScQi-P-OL~VLp^PK1Hnf=6E*=M^vyAZxnx{=fiiCZX5F4 zJoN=o@)vRY+Gde9Lh_woz88`L!q2IPn;- zai4eXO0UG>8C`Z@a0gpIqHAW2!p*?;VWI9#S6~m$O6^BVLOVJ9fHoWQ>ATp92?R@o zuCzJZ6jhy)NMTOLl}LD3rXVjiy6h1LBYNpI10n2Kp&4mPB(ttCUuHw8Oy-eaBpc4>Mi6o3^I@O1<&0h}2Yp8hscgdkb(q)lI#TUbe**LOk4 z5*h6Ndma^eMNBLK0hee()A1MBwKqZ_3`21ulEK)JR0xiX4t!6^(_ZVPwSpDmA>0%_ zjY75fo!GIay`c5p^vfOsDivMX;xRk`C&?%ilsOE^mnL0JsarV^vCJm*i!C>k?q|$5 zK5kSn$2g+Oa7CG(vyqHk{F*-D>p)IK8i|a#h2!N|ZhwUrFLJS-hGV>V!}@jLO0_a7 zSCx_{o|DU*fe^4=b}4Wu)pB;#f!?q|OT{`))M8&M0>>X|t%IaDw^C&#S^M_+b+x94 zYo{!$9;*4>cex{@JotrO`}emkUyU#h0#*2<3$+Lpyw435yso+qn=dFBwN{#LTs)wX zkkNoEVgxkF=DQLvIRWi{U-jqTYFcIJanZ%EHFP2fF>;HpDRl1gT!ruDvEvk2Ud&qUM1`W-PxIEK)DOI^-p z=p8sWt=<6wr)@XFfCcZDj5yhl03_eg*HvI_wG0e&d0AC-i_XTj59hnn6$)1PTw|ws z;e!XN1Q{VLWeCwSrh8(5i{C19l>>S|RIEQ|pYM>dM&p9O^>4S5k4OV0kYgO_lF}b z@Ug|Divz5|rgo=uhQ zFn?k|C2i9oiyVK-DbywwZrnK`oFkt-sAJ4h{!Km}>VXH9U&{QRXHBkR_kcNk)vcd` zcHWiT=Hw|bS#mVQ9I@ufT?X5kwQD$^_zsM*J{;1S743XE!<*oHTU7S;GPRwSd z&x6-fqF5(=z%yTA%~*B=w_E;(d0*R!`du;wn;?e*vGQ;-&sIlHPzGL;{QBnOTd zSRzPxD3lFKa=207un=)mz$rTvTDOYoSeA&$hQkw|amBm~W6YolfEzc*iwTnXyTd#C z{tDE*nWqH8+}gmxf@M5C%E^S)5Mvl#Wz@8jO;6N}&IQg|DA63A$XLP5@bkHU3qzE40P;=$p(xF(e-zZ# zq*3?|*y5rJ->RG1d*&AEv%~y_r^FS{uMMAZw9N(uL&K$p_~)}W`dBXl?qxayl}F|x zZ)4vnTLcRsghWrSkO3CKjhPkIt411cZRzQH)b-aXU^d)8Pjam;8gIO0r)^tqBS)O# zdO%0}96RvOoYQS34l}VqT1Ua}ZgUu^xaxgN<-+8uGf(YTUe`J-*bAg|wCngy$Irc1 z5TML{y$!}lGh4?*_9ga-O>++Uf%L+!z+|}Q9lBR(50*vBnmzpYh~6?(0zo?LS!|x8 zl3AxEOyXKmq=eGWe2=w7vGo&kSI%GvwNG~vBMqrN!od`DP_3VzV~9q+p8}f}spXEJ z?A4_Q6gnDQO)kK+#yGAEZP>W}Q4{-axOO1wph;obMTU&=_POV@9hJ!Jn~WM$ z^h6HG$L;fd=Ly4C=vIbJ7dz5k-3|VYPjv4Xr zptZaY*nBm~RR-j*KgK|m5e9-+?}Yt_Zk#OZdcRQdpdtL#klM8kY}0Aw>A=ITv?UX+ zGoesxh2rH^N*Sh()MILj5}9|-=5BECzs}v9_!4=xU1$F`zgZ0C^p2i-xl`#7pVIqs zZ}*^416VipLAEo<+~C+g%W1y!j67qRQ4gRGCd{uYUf@3sZqTQoCixSn9Aye8jkUvS z^AuOEY0aE1o7%Y9JAwxtm2A>PA;62}Lr&@{XsiPO!x+^%i7o%NeFvRGu}TX#sfbE| zI{!!>u{-)5XOD^W1w7SjV|7?USyR zP5E~@_WlroTwm}S^r=#m=yG;C8J(J0VvrE-!x91S%)e4)vJh0|(b@}nY0EEc;(Bbu z*L5~t&N1$L9jaS?RJFaHg@;DA8TdJQZE#RuYbh@%Q>hkH9u74Q5u*jrX0!xMClyGl ztBYZ&%iZGJ#Q#u4mGr`n`?XUoVXX!J*okh)2zy43%#Z=DFJpluwrJwR>XG~T(T94p zlF)sV<+Fycmn!NE$6}MK_PgT9fs8Z_1AuPAz^%%%X7jn?munaT5!-y5@$TNk)znYB zjH^5#APzkkw26`%QNBoIWtkJbOx{G?q;%gKQi|M`#q{S;bQnHD#>7}zyQu=g2o72t z``i2??2F)G#hw9ZyQI(%4uGsCaijuKMSl0H%%!pXWuhrLZYD>W(A;q+0$EEU2BjTy zv)D75^CmkN71_Kz8#w7L4>}BC&_+%sK*!&3 z(5$=VjWQ?pNRP7G!#m&I{%D_Fm9%aiC*9kli$`qltIJG)r7tB11vhhSjsyC^2;=$+0V2?2A8_B`yPSh{zDxBWj60h&Kc zpq&QwrM>6+wByC{%DGPO{XHm;j*3fO;U9Tr8^mBIRlWMtUWu+UlciNg$*qfYInx(+ zBB}u4%DHUXkEJ^@n-dR35B_tO7eVHJt2z}lYci=(ohJlCOp6hOz*`Dqvv9PHqn$&~ zp6&|zD%KCp=&!?KTdP8#hlyz+2pNNCI@NY$Ou84$UNMVB;ym$%{5vZj3C=&lSLQjM ziAyX#^zW*)w$G^qho%R3oA;+*Fd_MzRtay^9ShgRIkgf38y|t=zL=qqU+m|y`aEeL zO76{jXC7&VFrUGm9_P4e#L<62Sj`(J^jNQ#)H)xYaJoauHnzWbG5pX^G>ZO56+pm& zAGu{JB=ZJ-Gd>SdD1;l6)eLMr%${m!XEDq#ibGJty<6`%?q$<5&ZcuNi?VKyET*wN zpmZ2^%NLsH;sAHG5V3GI*}PYo#tBDf1M`IAYqrA)V{DImO^jwJ=W#g_A?o5)6pg|a zS1VPm@*WPxfz?Spc#*?82LWakSJ3=7xIbP{Yq9|Js?A%?og9yZGL?&1D~`168c~K` zb4=0&rijvdMK(eJgFL&T6%zc9798ZRyiN5>v5yi$+fK~KrT%SaVj2TTu<&3gnlDe$ z#A$O-yQSr3A!z%4N&@QPa+eZ+8Arq_@10IKyDZP<0;QlwYs|@{j~YPSuMSJ!X+@#! z3Pj(&a5Qakp4d_W!4&^cZmLfg=?EDw(OU0RNFqFH{lY0xI}`!Uu2q%o{(CUFsz3OK zkinuC7Q<2V7t_&&29-DsicCILxa}dF2A#QJ1Y{7K37(0+7rk?@6a0)U;zGmuLn{Fy zS4(O(Bt1F$&x3!2A$jh4TMdX?>xAF1+x3aZy*&!TMZoy&CdLw8iM8`>-K!H{3jctMzW~r38?abB6J55yh}=#HUkrNn`e6P_ z-V>8S)Pbr4gxElY)|_$8C_ZeyE0|MKq&do2E07D-gApCp0nzE07Db`wqc!LUoOPf1 zCI8EDchtL~_#es-{E-xjEsyS40QE5I)wwX~qrVsb9N~!U#y&!cUEN(G zg1DwkXyh{^13^%yJEk+z(SYpClGJZd7JxNl9RD3Xu9oyt!w=ZXt_nJgH5xWp%vdFb z^NGuf_2TY=x=&*|2zak*Y^tUWZp8}i4%okDI~euv3->l}Si~!7Ch?L;_}4;M1gZ)T z9(Ip;K;*rapg!8aSbQtx`-y}p5~Tkgi5w5G01vUi!tWf^iZ2QV;?ey)R_e|*KG|Sd zs=-RQI{8zbl4hJw9%N7r-2#~XAci_fGjj`iC?*A$PBKNVb0@#Ow9y^cC|nBoG98kN zPacquEWgUTEMfDzOex*)VS$e=t~kMMK6wQAIjr13iw+B0#pBoeI_b?g%3x$)fZck< zZ0Py6??%0r{PU0GOYvg3DlkEGP+AxJi9}8hgUEdE*b};5A|9+a{Vl^UY^?1T2P_0W z)sG@er<*bR|3%!+qjS#I9qV?!_5muR0;Ml_z{I+1oD5& z-9qiMmw$Y3BWFZ+M*iA(GHOc3TU@!Yv44JilLh1lJ%z?xQe>moWGjw4|B%kOj*p|H zM7Vw(Fi{kPwxSt4>`8^$z8>VUf8V%R5}go=k*OsS`@XZeuKq?5tNale01v1gkl6aI zRx^N-__)(6Idk(&aeT2Lhte#wYscgoq$dKfnVTSR`p&jOMgoYRdj@|-%3{oK2hP2> zwP@<}t01uVm0M$|;Bm3yPYeEY>||s9b0?;Q;{YFx(pG+XupXql@17n{jgO0QkZFMV zVTJ#=A3A>A-iD%^Jyan7r835Lfe=_wzx8<@!HLl5>Odw=3IR$->eNhbpCe7T9ybIG z)`z|bt@uk&Jk8Lx^OukiFgx;zmm%?uF(INOTr1&wHmM`(q2J2$57RK}cdk<9TRkOz zg|~dme@l6sThT4|WTbnJ3rHaH>PFNrPbycHlr&3x@v6Gn-Qg{obhMY3y9NMa#wRSms`Eg_-+#hox}=a9y)RxXeuNwLJLo99 z!l5KZK9oW7}P z)BN`ztgUE}44~fs!by9!hA`SKR&$FfCnxDJ3-=tU<(cI3ZH0Y%<(d}`w&oCZhxRY@ zAHTj(Ebo?n&kmt{zl7xmd*4^HgEpnNUujn{l5F|9pwe$W9aT*nm=a0Zj;F)G&?+x? zFeFuQ*%M@WLsDul%6A&o)orY(7X48SR&D(~4I2=pR!&nAa1h5wu%5m|+W9T}kdsqB zck-AvKRB4CqQ-MmWHtyo{=&izr+Tri0@>^@3F94A>&8rIw}KCP#3Mh`j8fxmq!Nks z7X!~??S7`@7e=}+cr!^#hY*t#8$@JfjRwR+j+Ym1MRr{@%{aQGqOiWc32pUQb6AEq zhfXft8Lh^VBXGPIJ3)Hfn#{co3V`r+PSjCu{c~je+|!WfBvygMx4@u7`ms<<_p8QK zI$5g7k96GY31-)(Y9YQ+$I+U1A}QTcK~FV&nVf>=C3+4VqJfQ3UUu{9fzz%;gZU=w zk+PgUrVb(bcF$cfYYys;CYoyW)gv0A_>II(CnL2a?{{tK!dlx3!LuKTm{;@y&dhwoV0wFpTt0!lJ0mr9cXS(&B>Nu6t>V+_b6J3zw(d#BC zq0bOGIt8P#IUO1rCTTzgpvo1zSe-ekHTcIml$M?n%k|Tg^;J+O#2?baM)fnag7{U{ zyZw6O{Sv1#_pNxb#k-<(VrsFapX=u3rwi7XazYPA63Y~mB z{gDMMa-Ad41UL^-oCF`ZyUYDNACZN+on>f$I-A;{SpC66ReP$B%&M@^5t7uTELNA z^6tvMuC`_7sIS68%l6T|JA&`B33`O`;4_EQdR}tjd1*A3H6YgH$(O)Rp3ee>qu{T( zzAWE*WiWq4_uvEPs@0lL@LVBZY8f+212TRgy^!yMdQDc-ym6V;ik~S@@Pr{^8w{iM zy!~qMK@IQYD`yvj(pQ&3opZIFD)sFt+!V5S7oap0(kiKybYTn;T< zpd@~EHSG%X>GEB=@4l%nd;h?Q5QSnrSB{QgM|uc1NJa48`MAEV8zI~{u0mYN386T!8#R~{C^NK!j4;2 z*e&z2MQ7Ng8-1-~R@HN7g9<$Khk?g~cDVaK+{>qr`L^B-4lOE-{t;SS>?2==Tens! zSle@axl~5|c(daj7UV9CfgYV6)gehp7_d4^6m3^dQJf6r@)`u<&LvjlB*t^^|BNal zlFZNE!4J24e*sAJ=-LcuGw*#~<|B+^&1^BrL{)iAmV5G{W1LhnR~$X4G*uD+%BknK z-le#1m+#Fi=(z}h{(grZkqrV974N&b!;3x0@=)s_dH3DgxL~@$l1MlK$4hAs!PB!~ zPig^s?~vW$J<5am1u_;<`}yu)d7n`Ni|q}_G;)oaMT^x^uW&>m9#Ft!_RZ(3!!)s% zdO<1nG7BrGVvjCgq8)ef{vN3MidCo1jXA2;jPF^9%lf zgeTM`<)2rXIlId?n=tp1w#{wy_*F4*I~*;=Le77C@=X={F5S_u@13Lk!k}#%``*Vu zv&PC-C0<~x3 z{mcoMZGXQw=-P~4A|~To-{fzsf`1KT^I_7#?i8*@Dt~`l=?JGGr0c+b#un_Jca~;* zoA8lL)nj4d(z45?L+HQS5#f1z*a()hvj@f92-0WP8I(V7Q`St`fmilAhp~Rw@DZt% z6^BaCSS1QYx%8Dl<&11nGx`l(#|57T-@8Vl^)Bh?Q0)zI)g7GO*uMuzofK}V+(hV z{74`TDv3YrBm{L& z1&IbzZzneWb?=x_5JI}0xwRP|{Na(Hgw|oA+HJl5;ajz-Y`XJ>b0E$d4*(Wte}5{M zN#Xh%(->a~_;%KZ%b@Z<22P~r+fcvm3dMi*mSI%1!p2>xAlzPe(k{5--3x(QnG_4= zxof2tb<*(&07$MCR&-Ww&1-ATRriDW$u#!0J9Db*MizVbnjO8!NF@?N7L=PfJM^QA zoAZa4PFm?=Wwm;vJn=55N!WMowf*aM%DBpW;dG7pezV{EsPyaUYDmOOt7oXzP>0qX zwmtnW-?CG#`YnVLHv89C_J-Gfd>Js^7R zarxM~l{zf7E-(A_%Y7@0TSA8w%C{0C7JSyOHtZh*ypE>X;E_l#Q~rM7mTO z=wu0g3-vu)gGGfTeZdSfq!jJrx)l{E{lS-71ovsiWa4A2X`-e?E%mgie1o2Zk ztn@`yXRm`0$+7*kaQc_bq5;l^g6xmh72eAVRPv|YSgpm{ez@P?1v00-vh-3aI7+b- zP>pZFZ4qTgQ`|gB3?+OCxq$_?8~Tx={;%%&$a4pl7F0WF-B<7*%S6udhp+tl6u}@e zp7iGX!$>hJJsrx8?=HrB(mVKyeM0^+;+y5a2V?Q%d1VJ>7WChKcpw=y$s!F%t6m{$ z05ZOB^r=W632Z}uD2Aa6Qztvwd4Bz-$g&bJK?6K8y+yw?%>d57S;%TPCGPB;NCq_;&d+m6tcQy)$r(l=bRDwz$GD!JXt4Kt zK+)S^HgfbL2Bqi!I0cQJJ7$nqNY9SEIcZLdrFyL4q5E>U9Q1IKamg|Wp160XdsmD5 zuM>ytQ%cJjtg5Bve9frDF9KI-J_h(+L?0V`f}M7-x76WBwYBUA7ykxK6+CNWA}_B} z@`!~!ev8i*$Ysg5MD7~4`-t>Q-q+h6*pUR#rIPHD52t7vxer6mvsvo5PXbD;tiQgH zKs9BfS?=#x2*qe%z6gy?@q`Z?Ul?s7$)d~_~5DQPu0p-MLjY7_m1GeSSAk%?{<9-X}?4?XMfCS5YV(t^i8|UMSCtV zCGxhQ$b(l(;nVdX{!@OA8SNFri7M}h>n+R^upN<9s%z)Fg4NIpF)p~h{pGRyo*31^<89103t;|vZT8iL zah9i~^PPh?mUU~8qf`3UH{=GQhDT<4sW#&K!<&*rj1@Akn~GMa)bS>4;>>S zpo_2<+I>ea-^Bx_EnKTB7lAa-pU}5*tt?hdR*wPamnY3pINQjd?wW6MW*0D0(?sq@ z!gVorkyo>aPAU=>b!?_siY~^f8RcKuJ`L;$ioYawp{#?O=sBZuFPF)|EsF^{7R0CK z|AZ1`BP;KcNx!vS3pB=%km{2^BEPWmpWaG|7c7v)?B@1VdP1P0IBmuMykoh!ua6sD zhv5JD)>}t4nSDC-xcsoYQ$s5xOpfJgt?U|ZfEaYk-zq$)9H#i=6L|?HQqq5rs0K|+ z{ubXRa#dlI03ZF|B?Eh@@q(Y%PrZRyh$)!RkS$1#%dIW$YBMw%q`F*u?w0%w?HW?E`(uS;_z4E`K8PiviA4AKiw5)Gqz=IP# zoXot+EP zt4(2}-UL5;F0=UrY)vJFE%=odNO(B8tE^J4yOH`k)HHth`N|&`MWr@r!oq-h3&eWu zV#3d2c@_21+V`-{B6ux^710&mF4FuwD9vWp`ye#`IN0_}fqq0O+#H08jp<)6Mah|Z zcizZlpDgbrw8Z~Zd)qT*>9jd7ea!$h8QZ(n`qqlY3PMstcHJHc48?omaEH`f+$|XA z040^kJ@T$;zMp(#m_YKJV2aRu=UxA`E{2!7Y+etvx3Rw&$`{SWho^7w2}Ht9(lE%G z%$8L?HWY|3Wo6sj>odL%`9>^a{xw3X{wsBKY{LBcm!9w4+7~?&-a<=i;9ewhS#U}f z51ebZI*h4LnK9768`5~u9ax%ObDmE^(8OcfPCW=l9KQQ=pEztI;Zet zU}sPLQ9iPq=5>C-VRlp<_i7ijyQe?`aaX1Gr;TIKUE2*%)Kygu9YBA=!K#V|Uggd0 zE{WVv?obaZTiNu#?7JT0Sb824?YEQpd>RH`x!wL{Hx_Xad8eK|gy_Neg_)Dq%rfj$ zRW)SXMh+o{f7DH?{`e_0n&#OoUmkpA@SlB6dqH&N8#3&m`l~Cz z1zvpgJoPjQgvxt7_#(-l56<2y-uTBKJ%~MD-TUx=!z70(b~R~in1Kf0*=BF`;O)34 zv^D_jW4~mVQCno$w2_L8dxH_%oO%C;GZiSuhH`f>*5QTY>fd_6vr^e~_OI7$t2dcR zkA4<5Fr(#;>SsbZblsjkP9u5AV1ZgH_;4(apWia2Au{6aPc{`)D13}ltjMb<9x%ES zOq%(T_*YD)C!+BLN-QWpG%;kQ4x$a?&lL&xjfLDaACZhO7t;|yP^W-5{PyQC3*m*UpCDh<9CRm006NJ-PQiApr!+CEH<@ui~`rO_4zVchYs~6yP&@2wFb`*Fq zgPWy|9Cx&Hi}DwCPrN$SgiHRiQ<`kaWg=z5HsHkVX%g1#Bc_-5KzlMKXGqw;Zj4ka zEYGL^cTq3-d|G?fxW=hPB2n;vTMaqhc%!a3yVTS)MyAOo$44P81&^&Uri~Jen%VYeb_5i>BlMyrec|#kY(G~rQ zG=D=gKI6Ha{oj4Jrca#d2vb>7KAUF)$QQMD&P$3*vE_jFrGJ0EcA$&1hr(t0>%fs( za%ersA7@IBt&i1>>-O%ytAA50&p$W>wmANx=G9stS53(%*PWjo$oIGdft~e!g_yr| z?w~17PHKIn)KMCW7Y-+nV?=*TF5kDGJq`jPL}(5_-r0M18_S-kuRp)I+d|X~;HoXO z_+&d@aZ>_}i*ajtTP1UM3FXTKzqO~1QYsRD0Z9KfFnDFLP^+7+gY*q!`KPVh64}Tj z_lhi(OA$Gw93QZ*65#C?9wapY3%%CbU8PXdu3yn`R#myhm+6E zKYYEX6l-LCOzT!@c}-i(jr<7>7>ubL(E3s?pIUuH!+wzo;6?iKAu~fx&*}P%PjYGPga3Zn0wtv90IYhoz=25Wm%b~yuzXwH^JvT7( z0=(_P*ADgmfK~XntSrrg=E253S8N%LnE}^eufB1xd{k5V)u!NauW6m~*kltGji=q}A4{k0C#A`syST~`Y<8s;qkVI_=R8=fCXVP) z<2n?AGi1i`3>FYt#>`M9tQta1*ygd6{%9T zJVFD68iZo7gN(jzva_7N0RdtlVlF(Dn2U4Ie`OQxp1JB{qRrl^heeLEpNe{XB4~af zeyckN;++_Q&2n^3=+g0LM*{#+RMBIvr+vGGfQ-N4I<*Y6v%g{lMZpb@-w_j?dos4d zv}wuy52AaR;!wo+1;D^hNQixE;?pj!mBAZ*+}u;!wITLgOmj{R9mu?*79m8$7cBsl z^Q)nWN2)+{Y%hpEMi6JD`Os-#x>(o#sVoGYi}V;39Z)L*RFUQC`r5+O741I9HUB|C zQ!iFY&7qYSuOg*T6l`rR3`7v0U?oE;LHBdlf|$* z?P@Udldpb@V$Fcgd%I!vT&NP~P?4kIt-*@>x4CkW#=qY;yKy6wK~wog_O$2rW&a~| z9qL)m3bu<57;#x)D4f6K=C@R$%(O4KIqp0NA)+M6ATn_+8jwr*5KV;tZxaiA3dREk z@z+lOH0V+P;o8Z)I5KT-jsTUMhmtP}*CqK>5^k6y$scjDuYr2qj^>>-QkplV9Hx&!)R z;DlKmSQ$&q3b+ykxRTTVfzofiv*^KBA_Qf=v*PND8H4E!y|*MdWBeQY!tBFh@9U*K iL9`4fX5(QvO4h>7h?_LJGx!c2Apcfbx>VB0@Bad9D2Ryw literal 14679 zcmZX5by!th@b0ENMM(*%14wrtLK+DPl?EwAy7N#H(%oH(ba!`3NOyO4-OcyA_qq3Z z?mw{i*(+z>nYGrecY>7POJkvvpaTGaB`YJT3IOonBODC%`JbA&;x+(K0nC@W=5$1 zvLAvly)6Tj1*i90&6e4W;tpe#%Cn#QWg*M~s@P*Zxm>czLM@`Ia$2r3b2KB-Lg`mO z3-% z?iPSbMN_N5*+w*g3Bwhi2r%RT{-3gh`M&T_RH|KxDCN?m=O@Ycwdl_s$RuK2Ert+L zBRyA=4u&)8VOkOY2>-?}g7S^*$1*9ZDRDT;=NH6%Mf4eT=<`d4>gA+W)&bdb9W`l1 zhnH`tUq*&n<0pXag>uZou_J(mmdh7|r}=2LQJ+is{1id&hcqca4>C{&|6B#V?$t0H zuDFb~>}!AawFK}n@K#v?`MG)A*ykQ`p9lYc|3nH^2U1r0LxozDwi}!FcHsq(6!G@N3xW@b3BE!vKPPS3qE262I*QR-Q|pa=)|E%V6B1;(c?NoXRY{jJ$tb?| zU%_~|QB-f62Io_UY=@?x|G@dS0iTWU<%$~v;1?lM+S-muX>HMO=_~Jf_B=ZeeVF%n zi+4xdreE}bFr0v;@cf6)$XuMt44q9H82Iy!FHfZnHf4rIQ{KxV4mW)k1X{mye?+Pu zZvk;1$;smxjdE6GV`M05UEu33+&~Y531GZ+lhNa`yLR>NMb1vNpjYnuol`R(?~M#} zChNI>DuZC(!OP2YUiIgF6wpF{5%ePFITPI$hm*K;QH!Gsqkk89H-Z z!TjRxTzSetcpwMj)(_sQ*+(78(~UR2AZ2IJd5!PDm3>yCJFFyu@|&jeGwACLZZbue z6zO$bpqi(n)AyD0HtJBtakewnw@qp-=O#6AwAl;tdCNLCl6?!8qN(`LiJa|Aln;H zQ3QZEDv&YwV~Uru3pUL+Q!GZGSa@D77|Ow49t-WC9LcJx zc;3H%6`%8l1)OMbn9qJ8dhQD%1`w=0z@egyWeWXb;@e-5UuO$ zG~)qtra?zwI%Z*0hns*gHtRmr#$}{aer+CI_8SBQ@7ezE)>VrTd1yf`QV9>Af9`T+ zUvZVtY=HtTE`IT1_T9qR3GoHVJN3%v(rGTOTrWb(GMx_KS9AWAXMI46iF+l(@PX}L z5m&YfsJ;cn1&#D7i4`-T z_=qkrfC9#zVSJ$qE`pAyNK^zEPg744FLJABvH6}%>hWNcaadgMf?X>01VCkAS>?IF zI6nW?-|{<%)htU#_2oFS?d*)H0Q==5B`Kk*<%gT+85Sy(vtH(d8opg=o+~Z%F(HCj z@rhO)|C|$4aQN|opZ@M80vaytRO9a1?1lO2-MjpFrzm-_?cqfj#VU}a)Q-pV;NCYh zIzH4A_H)_&mdH-U13g`p?T%SO_b6t^09xEuy60^iw)uYD1jmPi6e*cnL!;Firyb@a zjJ|EO*oA?4G=)HPj|4%tCK6E(zs@UzCN>c&=?~?tX8uwkP5PCmX zO>i74nK&eaZ!XL&F;zeq1-f~fGs(^5jj75B!h_~MO zvpHUv!hiQhqRdE5)UdnCr0Bh2a1ay)_k1t}|`2le?Kbon`a6i!P zMDlMC8eY-7HdN$7$^?a2PoO^`M)*0|3=Ld|Bpn!_Uul-}sGgUxSpO%W)QdMBz+=@$ zmJ1AS_o6XpHz8~A#5V!J^*Q0AtJnX(bkXcXn51a5pwSqmgb}Mdo+elS#NXDx)It}0 zVl;&_$#Z&SrV=?GUUjKS=aUk|MTf1<)V^Yh&(!C(-88zvSJQ-2AW4&+tE7tRTXoNA zu`&7Py=m^kSM3O(4N_W!D+>EZNdOpc?(j5TlU2VID>>3G+p?$ZA9X?~;Ww#Gxn6&e zSaSY)s)G?3Cwmturq84-4yO!J-*C7_bdO#<%nLAiO{_H^h8AfbkQ2YGsTPLQy7$7UalH` z1$M%j=oEhpyaT5Jb;iH>gvRrys~Da9R_s>4Db}>@1v>0Bf`;JF<%m}7jI}&=d_dIC zUl1lSyB9}TtNshnm%g~x0{54eI`Z$(>Tr!@h)bFgfN=#jTk-@p1Z4t?aT$10BUE5o zB?N*{BnAP{gNFp-JxxaP*=3;Sy}eQmzK)doXfpg|#Fi;oM)UqdderVUGoQE^ccMp* z^wMy<(5c{R@e~+Vp5*tA#{^o6=E``Dyr>W4Zhr`R_qlw8R|O_xKdLW}s3)ta6foxN z$Wo}qoIe#kmNF}$)W4C%#nQab5rqiWMWNDjs`R{wsY8KFR(#hIP z^wed3=|@SpEMNrohpWEfT{N}DywdC?j*0~Rwc-V(-*~k-)(!57V7J!%WncJ*paKjP zHZDY9CxNpQ-$qw3CmICFglM9G?GzMr`=&@`pf^HB_?YSqd^M{m5B6RW7e~|6<3A0B z38M>i5?^H?M$(Tw0{(JYo}M&pwEH*qy_5R#1=cu66q`t-R;!HFmBfqQflSG>bvwrp zbQ!BxD!%0GmD1C-gwUNGkr5uDk}s|a5Nvd+Ie0zO55p>qjLo>$534 zykFVey+A1LI+tPkVQvTS?~O*7_YJYH@aX z>6k?<&=&=GMgJO`;7JbBqr3HLcD>mCHR*)32)NOWm1KPvk8-nY_imcsdiMhz$3H&y z&a$Hz{f0F=yUF5YYHQ-4gQ89VG7&+8>TOO}d_~{!3uzRwT)2jGDyv?V*hPOmrM<;K zkDC<5KkF(UA4$_4A029zs|%VY!?*I`omO~qa$G|vl@%=UkgX&N&_hwb1nAwQ1IX$E z)^Nq1hffvXNccx%^aAt*f75L~iifya8lD6w1L7PmIA$0bRM8GtvAou4;X0?aWAliCg;LB<`Sp0LK}iYfJ}u;_u7=;21cW|py{_s ztjCf*Qd|Prn3af`Km>rh%o*Hw%uum%ffdd2vl~JkDkgcer%;E%Gk*py+4 zX?6Fmfd)Q}{#w19l+;|A*In5Z;Ly)iJP8R+@3b(A|0G28mZ^3>ygBZT?)CD10X%mM zY541t6JFW)V6&;NX_tWtN<7v-1;E6M6x=gI&n>lK)+ot%))gXBc%Fy9zlk!u^dfWn zY<}(2Pi3IpKjj9!r8V1W-g!C7jtp`WnbWJaC1Lo@5*!<8c`OU?f^LY%-Iy6g&eKmu z2AAlyggm%h)Ux%2@SD8TGjK_~c`~N>x}w}S^n*KikyRW5&|o6z&uP%)zkgh_BV%%j zpaJ)24>fC3Bja~3(4o_rgB_F&Yc>=_oN)-WIFqrpMpVE1`LE<5z>3UgI@?!}^I5;v zKbS(-5)0fo))_v1nY1?rZ+GUIEJJd|WN?=wBiiJ48$Lg2#u}NTiOlAV1~k^8l{~&~ zr3w37dVQy-n_O-@9&5}~dH8M^UZ^4l9_mAIbRqS;^SdK-JcRqV_cC=h-*`3F ziSJ9>`ZpZU1J>Lpu`e`=VhM15I{RMwOMVoKA?$17Yd(bQm%W-Y8CDTk-yX}?J}_`Z zy`pamumj+(9zUjps!)~3%uP0XZQY$eiq#7O^LEkK7iXhVqpP51@)kv7cI(p2$IJQb z+r$7bJ_jFp4^$+|x9L{iq^1GW7HJy$@{P!?kuO?*ZwPmWAW|Y+YaEEpf}m+9Fn01h zln2hoNk#l=61!_^-0wve-I0D}zt`qHTN@FT==0)B^9;j1tV7Fl{;vjWef04S`L+ae zgO$0Z^LEelCk|Itg~5{-AJt*(Yu)MQ&6mZ=%5}ckyV7AEm1u5-37_#?Rv0r{ou3fy zO8My5HS8!9NJyDxhhj@9AFWF1fq0N_)Vh(dW*y08em^MZ zFY?v3^PO}=!vfjICuMbPSFbG0kMQWwYl<o$@!Jj^Csc>X&%)&=yw2dqA$p z30CC;YgZquJD=_1X4MArfEBDecTnf((2)x3^#6*w*T??%+qw!yQgZv{T$$KtZv^1( zfoHg8hMq-Miy5OkhLWbG(Qu@&+iy&^va&?dI_xo7QS~{=2Y}{O=UrB6a8}~ZADk%2 zzV>x&DeBvF0X&Ueg$zShZYDqPPdojf7IN2kxt!}@;N}+L{TJ2B-mIpEd-gR6)g}42 z39P+T?beMxx^SAr5S8u(h_lpsD+BPD{hMs5mfs@YxnY4mO;7N#TabUaqs^_ey|AV0 z=SZImF$;aldl`JgHNIaj24!06x?J)m(V%NB_cQj2ypb51W24M(f@quo>`ReUeTvEY^_*}v2h+nkjm3Fy~nNc>0mTlDOX}*=Q zZS!YHK{wJyPJlz~Jet!Y3w7*C+^ps-dXJTX(QXQ_n_H($!a)MG9uNz$>7(^pDTGSR zPaofZWgeQI>cu0+Bb_o_`%Ek{wc*z^b2;*qU^;x+ObPd<3s$zqPv92I(T4Ou1Y}A%DD%EiZy^34$z@3|~$e)iL z?8FR_vq?sw4n4`?T+^Zu;mRBLU($~8zv12hua&iy&b^B^Es`-v+At|w{jM}Mwq);joekn%Ks9Djl+O2tNEZ#PEYO0A0cv?PNo?hIyH(PmX2i@SvimkjZJ$Tn8?HrG=()+ zH}60_KDbvtjs?*j^{apM%>_){M}&k_e+0KW!auy96&L^yqUq=lv&tns+HAGZtal*@Q zQrAfxsl%M`(vHP)v*Yt>`^&B_y~j5!v6l9(?eYR80Eq!8X7f_3rMV2h(1z1?b?Bo6>fkW?R28&+ zLvmOgyZlH&ZgeHOs7(ZDy_!tl6-xbB$WSO?Vr6H(eU|`tExMhY6ojqm{B`cvi+LoT zO8et8;ZYPpf`}Asy#Vv%TSQ9LhV0$x86LtWawJMNv85mRxOib&a7oG5^Bu-(648UC zTZn>$_Wb@EiMve=?y(SVq(>03qNS6S)pgERm$gcDIISbHt#PNa@~Z5r8h=dqr{rne0SXF|HkO0;NZn>>Ph zut7V>cuylkpA3syK76d=T}$suNPK_iX8zqSc3j#iG1JXU?W4L4TqPhVLk*14ud8S*WJLecf9v{(A|54myP8 zr1VIla5^IXBg-3otIuOJQ-7^5A2I6C3-Xsxl-0wgc=c^~y2IiPwv&^ON7w!g%xIBm zpLJkDdBWV)Z0D%25E*5T4`|3F*1)piiZ+)crGAtV z0K`IYj85dYt3Ugm9`x9f+C?$PrEPlQpr|xo*{G0=y1})B9LL1ehHE#he1H@Q8rM1g z;js&cB4Pd(DTMB`gidE?f7(H;c4{j3cG;1^6K1u*%=ULbHWw1QT%M`*xDTVhH!s)7 z06@0gN)Kw0vQNlCXYuM7bIBpdSDIw!fdM$>^C7)Fih1L}c-xPfiaKN;B^L4B4Oo`#vyo>n8MY_y)_B&x^;LFN1 z!_+#32TGoE6|FFts|AyfLCn4;yWrZ_*l>x36;6T0Py*l+nZcJ3?A-K%L${vpGV01V zpPSIx{i0ji@s8$Mh4GM}X*zx+sC(?F(?f@axZfA$ z>UqZ3q@%wOMH7@?7u{1ty}$qT6tgCNwyZk*pbD(p!L?S+{iBMiR;HMYbvF5RAHULb zi8Dh50aEUAf!;2+EP|kHT+y^VWolLeNS0Gx(wwLnw;*G{X(=O_C@bi2{|?9lT^@76B-2-aFrVHjj`2o4@k=2__ClDW%Xj`j!RN?!pdc&izV#_LK|I^f>I9+3Qc!4GuV~%CrV2;A=NIAO0 z-j50~8RM2VwquK&efabE0a&M9QEKtkD0F1HE<+GALxW`{+|jBhw&Y!M)p)1?UyPf$ zZ?rLnmPfD=GuwZT7+jzu>4XnlSXI;`LJKpUvVh(J&7&XLp(e^kDDbJe2E;J?=i6$bh1i;Ppz1LIV9w)A`as!oL>;BtC91bHZsay3&_m1zuuK>57zB-}| zfUWJPar4fuikooJ`1AnWmIWw ztuN@2pqqfQRvDTr0yEro!!@orB>yV%_it+wu@XtR2>xZc>*3dOa*jw*@wx8^IIe(h~Ed5I#`j@Q*$d>yLLs9pt64L zHpFezRzY%_K8|^Q2K1hC^53|8o-0O?Un3E~(qv&@KUuxANJ$~Y&9DuPB1kP-f&g+^ z_4p%ibBS4SU*;~F10FGh#8Agt3dB4-iw7w%C^uK#P9)EHZtcSq|LeR#8a2#IAJ2!r zAzvQW>LWlXR;!P*3O?q@q(dbK;p|@OB zS+%fix&ImusvIo6g#LK~gJZ7_Sn6X*?6*66s!0q67_ zNecTt)pXt$%)L90&ZZ%_rF_7j!cWy<+K_U6a(ayYG{6{Meg&#h_D}y7rZp=zy<+RU z%kNMhdTID0%jitbg7KgLavYR21beQ2st9K5xL!%hJ8W;f*lXMBO-qN10 z_7gLYE!*3kMQKyexFseUQdHEo>Gf*?-8_+xRQu(`PDd>vFRvE^+`lCNyjqEw$ryKf zj6cu1c(6`jpw#ym*|loNJz2d#)+ABt=EWWhtHvc*$OEXC>zyTLYH z%+DrsXSYXY*U)9SkiWMRIkgOZdD!T%uMCfsK2k(}MgA$s5z+vRiK93!!4s+VVx&%p z%1+8q&TzjIacv@p&mqYN9=2a(HHPY2S`_GyI^+B#EOv20p;Np>R1}c^;nN4Ygeb2R!G!j=501)Fpa{+Q+C`x6o9G7)V^t7Kf8oD6rOMhBOnPE6x8Z+kk z*E+sVOfYPSqKgi8!$$lolPE6R73t_$sbQ?TngAC{5T1-`#cu z?($Jx9T)znk<(x-r#_L>_c*bZuCTOR)eq{hup}KNi|;r|A@wC^_V99?S>%t2qZ`Y; zKsqgvQC0|Ydf1;MK)W^O%Qz^+i%o{9uvUV z;np|XEe==ovaS|8zo?4`yH;ssax>nm5Jf!3v1g85N+;?tFGop{jzEAtNLv|Vz zi=5Z+lzvd{ zjMR!W&xa4k$BICxa73JS7BX>IAT-M;jk1qX+_JE+wuznxOk4Ddcoj@cm`e}t(7#%W zyT5RIaW|sz!1j)Jv_TngucRuyKUPZTzd%6C%Kt_}8M=z1g~& z-|29koN(vjPOt9Qe%^e=(%s{vzyk!BtEQs;wascJU?SbkQkz3osA-K;I?^^EYap0E zC|~h;#X6(tK-{5>iEObFjc3mKJg?=-;wS8)NM$q(W0%*y!OL-HD*b7W_2JK+ouc)? zCa&*WfMr?gEJh?%-}xJ~Qp`#Hv#pnlQa(UdSR(&kj+hP0zstuo{eT&5$;JBH&?3_7 z*B>hjq-~a~ruLU^!v1Q#v92q1cPZ5Rnidnx`kqGTels>!M&1_}0%;XIS_Y$1H`MN@ zWn8R#OX9~ zmi06-@9*t}-ysNP-Q4sJNQl9mp81|wV902aTxtx~W0RWzwznC|G@|C;bq%fa;kbtP z7oN(iOjH4QJfwjrL<<&S0%f>uV2~3(8%3+>_e}juBqZlshW-0u`2JM$#0FX?>t>!TSBxqUa z7=@XM4-TnHrqQgCkHvN}0cNy|g*zt`K*wSscw=DY;t};$;6Zms9JjQ}gahrk4xNhp zMY0)kUBv8*wB%bZU=vPaRpF=6S+(y*L-?FlDlYtI#VuAB8@D{9yr=DmM$u`k$^zQL zsWGXs5**u}4f>*7geWAqoxbTMnKMMIsmv~BD5AcVgXPkka$Sjm-A?9EZh#0xSk0;N zlP`c%M^+bP@iGAG@l`M0^f&q;}b>Qf%!wdn28%T)Uf}E&K0sWEJAy*-gem0UlG0 zDD82CzsRXBQid=tD8jT>>ZJxfEa{e9n#3745Uofq3oj4Ve`yxzAK17qWUlR-gdqkW zmFwt`07UjLGt=@A72FCoVE)=U4utk+w zKM>y!X~xHIUXgr zUIBOyHN&Ki`5*l!A|X~3>@z|5*q>-dh3dZ;D3nnwm1cEeey`BBwG8>EB+qk_7-bX0 zhy;Mc?Iodn7iwkl57KisTb)Zg7k(69x22a=d2;h3;CoTxXs#pR;Wo>(~8vkhc;6`F01*8v^cxe|OC0kB^h!jCdv!q9p zzm1F!`tlae1;+2K=d*)xsj&sdN*B5Bz^IXOJC#c&{$mN>V?zIx&kf{U5zmZM3ytnWFSlgc{%hFztvx8Qq#!2O-I-;>JTf?<9lSQrHyL-{41feQjU^oO zN!B?iQ6l(Mm?Fv3_JeV@MDL1U3?;Bg_m1PzC<8bTG@E|ys(e2e_M;$Gi^;Jm+2wO8 zF-+rwt`4jGMn1-Sc)g5@9t`4bPaE42#1m7*TgzL<^#Mm|^YzB)y)d5sJuno~RG!JU zb7EUI_2VCQ(d$_g)TvUhBX8tLdJ9I~JEi+D#1qC-Y%1n!5?r{6iL|(bWa+P0Qv*xh zKNg^F^FM7boQWgtj`p``+l*Ro_G)If2xqt4yjp&_c*alZ5`CDjinT9j(c@)*Dx_>MuK;${*Z5=K}+F*cmAQzU)P#k;eSnRrSH0rGvJH z3XhYOeE+mStMx8T*3hK}`T@ATsz&FhkXrMro>6U> z&9IR~3)o1=ol9UxrY8WNC^dOe{ZRpVC?PFe&s*LuOOLg|qD4r9-B#H8j4-1?_opYq zKo-d>fD5~%=Lr4a?)>8Xk>-9hThRyA^Rx)TQH?zI%8bnfK(Cay8gUycC?{KU{I(l4 zJQNW{PTquw8jeDOMH5cLB_2g7$*Y7#;A%A;<>?CBNTTnfRH}F0 zMN+&8YX7Z}1UNPHsJ{S^GbHXRsS>Snp_x+?vhN`TMPVD-8yXooO#5#4eaXXp^s_Pw zE=5szIJbS1A%_%k=++ZN_HhgujN8+E`Do#3K9J@%!Yz#=hVbZp{-Glk_Yf#Ip$Q{@ zJ?W;ZN!{-_nf|&IcOpH-zDXsXr&c(P;g^v>m;T3!9*{~VW5#B*Ssk4prIfCK_MV(l ztT7~nNtvI<>HuqzHPvjlrlUfmB767a-Y9oQOZZuTM`ccJ3|d}hKP())-v+6oa+BT% zZl&5mSz^@Zptn7+yo#A%_l-aDNWdgQ{)@aY`YKB5tCJe}MLa<$;vvZSg+wl})zB|_kfl*oAW<}J z2gLVNxY-V8A)VM>db{AZ+YH4%|2gg+r#Usn*nRiRg+zY`{dy*ew3>>L3FqrPCi8u! z_!hi~D!=k-?(I!^9snl-8|kQs2-NCuDcbptPgjb1pE*pbL4vw-(I`}4BXOz&q<#f* zbUZcYIHy3V4lnAml)d(N2z_Y*y zHv!ec1-CzCZnUN{f6LfEr}IuqRn@=U6eF3 zhHXa`k7d=gbXiqyRyzED1MRl7X(ulp>Kt$)qT}X|-|czd(rxriEC?<=6AaW8|3d z{iE9Sjocw!!gpa$Dg5KVk?BveM-3W_(Fu-F`W9r#ue@_#+q1P7zm0Q%Xy70S2p={I z&$;P}e&P5v#R#YQtZM)DkJZ9i_!Aqs(dX3^ye7;Pu_kXd8y(u2Dfa=Xbtkc;$2*cA zhI;N=gik8WWiPN2=QuoXlf(UKa-gneGu+EEzD=7Sjbw1NO`|8wJW6zBL6#uu4n-l- z4H9Jd2MuMA@I_5kvFGkGQHXHJVEE?aO51mF9|HZQl*jIqmpD8fEYTHXUoU^uUOB;; z9Q`?ZmwNh4Ca=HuMwGQ5o^h4G@rXTY5`DS8m!wssSJTf`^O<5_jq(dUAOJJh@ld_n ziIE0by8tlAGsx;Q$>GO8mqDGY(Du=ZO_W`d9tc682(>1+M5LCCZC-3J+gg1oxah3) z>qfRaK$_`L!H30Jb9({f)o1S~we*pKOpsxjPeCFB9~)oCXev;m);_E3kMh{;&}Ra& z`2`+usT1~m2yC58STS$(IY1-NlrE9|Scr7?v%Rl{Ne)5$9q?lXTp#@WV#cqEN6dfv zOxmuL5D1x)D*JF!k>Mu;Keqn+jCz9vMm38{jy~XyV{bDO(syj5&F6zxLHC}pQrkvR zP+_@ah3-mIOYz9-O$0UfxzkW$y8m2Mz6Eti=d(Gf5H0W;;8CDXe1S>D9Q+oY+e9}y zQUWQD#BGmB^a8>n)DM|P4Xjk(;urNiDa5A_zAG|^`Sv8m)FHL5Eo|(a8G3;;2+l0@ zvW*|7r=|wWIZBVo&?8imG&CK6mIPk!hN?E?xWM?1u(r2 zB6N2iz7IS6`>y55BhK75+rwTHGpxhjCQ5e@CM;A@x1MQZ=R|^hv!eWQe#ZZQ{5k5x z#VM2sf74sk!bJP#(-a21?mah+_v(J;%hjglMGFZVp5Tf|b@8JTvd!<~o)>W`{y2m8 zM`RCSpWfFZ_aA@#suU1>70$D5$)$vra%GAPZO5aCpRu1bwJ;$V3h0_$OTSXZ8=hxO z>uXrW9{LI&@mN>9`U7o@&ncbcHGY$aw&&*!SvX3AgN=ucKMjO?%o;>wz?Ox?-9P6? zLNw8beBCjqUkpH$eNo1K3pae61>mYz&^S&u{Ol%6S%mrImVPunY|3%9d|iAOF`Q&? zDsFtwtfYtUW%^Kc6xPoi=B*14G<~shiaGeMY;qt7mG=ONHVWbmF)I06kDlFnM>m4U5YPmi2i|PF6(j>|Ww6~@g zq>3c_+BZ3uyu@LComUe6%!#<1PPBMldXZ(Kw7JuDC9SW6@Y8$iaz9=XA$Z{<-dbQy zUKn=4Z?pOBtYWt!Xs1~OhyJ}1E+L(F5E%qy3RZ-l;&6?RZIScbc}9i2Yf9~0%6}~! zq(uoJlgp~;rh)PZ2*%XDAr?Hq%{7N!8D}^33#R&~;@aFU7@!40SZOk^=+W02UM3iH z%!>nN<7-0~vNHcJAV+=~&%lZ4trT6Sqb0Cv%Jk2pl(SF5-_+QfD&iO_`WN8(2jr*8 z(G+ULygwajWA;M~{2xC1)s!O_4VV{`)mk7a!2WPO5q;gp%9_s|XvpAPDmyXh&NIn< zjD`s2M-Z1IYr9Sjpgi7EDvHH~gGx22>eGI;G2k78-cAbvB-=Pzcl7a9IZ6wrBeckj z7foP@4F9qR=QQzghe+lctLfvoz>5Ui1<^G3v@PpM+^+h?^-=j$R{+Y|mrhZOxS!-> zY=i@dpXB*3CU>{~dpYJrRd1c|z_MGPA^BZxdgzG$kt#-NSfy-_g z-x~e@Quen!Mnqf}ZLnbw!V0Xe87>4B95CPG-CUNZ*WQdnTv_TCkrUpI*xfsB(k7JQ zGgg6TvVYsJnV1C=dAqE1seA$eUx2{$tkNlqhl|{q#r7hyDtqq3$ffH^)bDH7v0-iN z?Xl5nC0nqEQ> zbsL8y)~BVm96KSo5RgKtLi>_frUWESf{myfRvW0S_=|2@uJXWxj5)t{Wf@^gkln7P z;7ZHLVTKI;Ch1?(=m*03uJ-5`%*m}O;+8+X+vL(xGfg%2&o`%oRFAL6P8L7$HGX#H zhlBL{NR$e!v!D7^&P{h+UZNEuS+_Mvfo9TQdOH4vV7Y8?#ywcqF#Q#MLlfjOCHk-v z(6Y?;oUg@Uj1UEzgb&E<*-N%Pdzq9EMP(wkEr(XdZT0C1zpb6#a zyvJKOyf>UFaFov(K7P-wVlAec{r5l`k4iYg29mSI)p|6^Jyw6xPmN+rvhab~2XB?q zxE}tA9=L63J@Z0wvsjKsSfHSk0za>GvZ?^!7LsOr#mf^83C0HQCRaqM{!jC0T7x{( zgM&bzcwhz^Dlv(KK-0#(jgh#I{Z_-9q7+(9&@$Y_};=f zHM$?nnODA1QDy6p)fcaqNpUu#5=pYjseWe#8SGDTDL4$GZUIA~%xey&Z$4Boii+Z~ zVqIKI9gQrVcV9s54qgUaeM!klecqAh3S*9O2;FgcHoMvGLh`Arn%hY0FqVS?C Date: Mon, 16 May 2016 10:36:41 +0200 Subject: [PATCH 28/58] New photo example. --- samples/Stones.jpg | Bin 0 -> 349415 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 samples/Stones.jpg diff --git a/samples/Stones.jpg b/samples/Stones.jpg new file mode 100644 index 0000000000000000000000000000000000000000..947077ec2b0a08d7a338cdcd37400cf5b068eb91 GIT binary patch literal 349415 zcmbrE^K&K9)`m~)WMX4t+jb_lZQIU-6WexjV%rnjnj{n3KG8R~?)M+u)m7cqwYyjM z58ZpO^}O%b+SgyeHyKH3NdOoa7~sdh4e+%I5c|*B{}lg8@;@c`&&^+h0906T05|{w z>^lG)6$}Cu?CUpx2mk<3K|X)0vrq;@NXO+?BDhOiT@u303aZtz`>va zFkc%0L0s7f!O}vrz;axqS}&CXf^`CDmJUv)Ba)p+ z$1oF7BpG(n0aX>fYmVe}8a$v_Q;wpTR+>XARhF;p5`eS!cW#qlBv<10yBFvIx6bt_ z1=IhpqFz#!gb_Lw51GksZKiL096T0YH|}|o#DbbAqr<*iO$o2eW{&X{;om(i0rrMcl zR>R-Yd6O8!QFzn^Z@G%jh?n>^G>L zUDNLCm9p3PC=VD;=-OKV(Q4oP5Cx=$dlED zbX0XHShlFAS&WNpiPw2b;9UY7aeiW{Oxh@t5ClV;h2^Nu-4oENkUd!?GzYob9B_XZIb|pZ^hFT0hb75?CWecg??}v3 zY3oau@MUJ41Lv)#Y~VWt~`sH;tdi#fx`&z!xZ}nlr5FK6Mt22p-XR z=C@)JM7C83qf*7wsO6$n_I(_4oeO?Fu46&8h~S9p9BZQn?!ef^f?3%(r!A{TOK+*| z=suFzN!;GxTZ6RZl~>VVc!53q;8GHjZF=U)gY}Pa)nYo?`j9Qnb-9dEsBFTla-So0 zpb}Pa;}0})l~{i+-j3Qj!!_6tHM~oQx@^$ zir|Pu!Y00UXdlqynWtdh7uS;Fs3}nKq9m)L*yy`-v_FhC0#7grDa%}&!mLQ@>*h%A z@xNENtyy~c`(h_Bz$Q~H=NEKVk81dl;?ePPeVqazraPe4Lj{psYsC zyiciKKqFx1TC8d|qPT2XMb&b+&@*zy8~yGE`eh@JKrh*O<0;PlShx}xtNA3OD&sU+ zBc0WUEPrAXRcLGao;Xt+qW$#a!TC{aL?fC7|0G7Fm$SKj2`vIEK{7qauPsjcxl}T+ zx*;v{IF{fKsm~g$j?L@D!^BCJUT@0a(WyPF*>`vr1;G69uA$HB0xzN4v1DIu|!V?HP}PUbKqh6p@Z~X(spa-LJ4I z7^+bhI>Pt|{4OJX5}xhcT|qT23cWKEPL~H z*u*i7>Irh21lN^er0900gt%Hh{g>C{Yb`tB$?0dh^eQh^QR$#zwf(l%8HuHj?JqhV z4x%P3+r-l2WI3KpqYEXS8iy^=_j4& z$6mHk2S0{m!auAQ-eP3O^V%%QGq*|ywYWQtMXAEtM-;|ivdx$tHg#TvrfraGC{0?X zckg~YkYX9dcgEzuJNTe)0+*tr1P^3L!`|9ii;dC*A$SSTX_a-zc_$-H2Al!sAVSy} zh9@_2K+f6&m+)+JVGxT}%oRfuZNiAla9A5^ zao+F--~HQyWQ)Zl#UmRH5;5h*^4e$x0OcLo!Ygq>4V4Jz71h-bZ3*Y{nW1x{?7yH$ zt5!)jWNz{7eZ719)JL+<5o>FmcmJC6RikUSN7|FBLv&)}^5U4Tm+JibexWkn%5^&{ zPVwBpSSP4`HVmPM2wTcqxF^9RP^fi{)|wO>I>ypl!x8|DP#60{i+77%vg;If9Z7f{ zSF$swGZr^kQ+cLn8I`(;vlx~_tY6natVfNtv{pgPxuG#@kdwgtB_)c+@X_E@N)H3fYE#wrVpcF%oR{ExKrReplY| zw)6`ikgHTq8QN+n?0qxL$W>BG# z{ih^#-g6gspzwQTPOOxNY?d6a3{fL{(`I6#kAiX}D(h zOLTC&eq7iufT*@1zXlj930rgR0172r4hmK;wAr-U`bvLyTbEW8$!()XNHC=L&LiHCNJBp&MMPwj4 z?bKvjY{6N0;DlcnWQ18!&{DXMI!bGhX09zZ(3F`6W zEu&D@Oz-k+$9M0MnTgZocXZws*Pktq8?(o>Rl)%sI^O1(cC0=$g}keRX|_-Fi!S;~ z0dw+&S$noLoo<-;`s__O#RM_@S99iL8E8~2>W*iW^p*6(IBrJ0B*8}1{pY6I`79@H zHdQ)ElE*QJ+IGio>XG)0as%(J`STysqfe|jOQ_pWmsCxC;HS!ECpk379t)Lq>1++1*!ik0x}EHinn`KZ3%a=0Pm2jo4%RTuEK2|!9G{*CJ33xFr=eR@k*k}OtH zWy%3w#i~JZq9ivs@()o}p%9bHwTb`sg&201~L!q@fRI8C- zcW|BA8g-qnWCGCI9)StR-n%Qs1=guist1YuAlNV6VV&tdY~*R%iEaeL$RIX$7q%Kt zvVWv57RBl}RdDH(Ge27=J8X|S#-_?2;M?NQC4X5wqq63<;*+~frZ#g0Ge?S>7*izG z#kFQr>8^Z`x1d(adPG&`s2N|oo>_@fWj>=!w;AKJ$Ih(9*7a`T7^>)vc9R{kK+BwW zOJZPfeX(0Kd!-nXnce{X9UQ;ZMLLZXg?gVzse|oPwr2^khOQ7ro?1xybpjPD+&sI} zy;vU|W=DtD3;N+J&6#w^Vj6cPq~Ln3u`LliI{fK07;EAW?*4f;fA!p@Z5d%nnO%S4+qTPKV7_Z?9U%3!<%zkZnN%ukd99 zBpn;P0dXk-ruy(CkGA}D@$;|it__MAG_*1AhuVX4VY&RqMfgf=%u*FB2v=kdASDK9 z;uM^wj%;Q(jVsic7+tS$aMR(6ooIov6FGy!4AS_qF|>k>X^D&A5rc9;p_<)Zw2_}m7wInyGgjZItW&=ZF>}hn^ zUJ;T~FO6;IcQnb{y8*4Du%g`~x5+|iIv)qlEE~j%Ot8>Jygr&^ivSx8Y)NSgT8jMM zXdWJ>y3JB~-H7zsBDhu|f!VSRslvX1=0~DLeni~!ov(!9Ri4s5P z(b)#z8~evPg9xCC{Q4Gj)fZdEYB`h?Q(&shb(%mFhzGgAT70fxUYrKIy~yRQ<;YMS zrNq~jA#rzy;0Nl25-&y`;WYJ8;(^jgP9*Eg4jt|!dEdUwSfsK75EfC>aZVY=#}^>v z`%Ok93LuwIv>t+nctR7FJN<}W=LahgK{{xVZ(xF6+csz=T~txlAE5;uvf*cdDh28H zR=vF@-Q-XNw3?p^V6ty_@2YBteS-95Rb8>SOfLk+0J=eoT5(bV9T7NA7S~P1Opn`sBmM z*6M2QAlUNF66qZS7e%wNGMu?mH+3AojFpQig-ev!Zop7`{fIf)Nwa&BxSZqV`<`TF`f;i_ja3=GzD(JX3`*%PY(;W3f4>p z2G}pdXK3}bi$*xN0^?F<(|xoYqa=H@5o=YgmNK?`p1C~rI@(UEp=UBq1qn><>IMpM zq_}_v8wW2t5BMij5E8}(rNKg9dckQ51R)Ot(A;6kTK*=6Tq z zYrEf~yc@6OIk%Y*jt%tL8|B)tj&(2gr56@u^OA;mODA8NTmu4sBE}N=W1;wuT|x-s zjU^+f1wtSHhSWg>;`|5XI1lgaPQ0hB$ng}aAq}~xvl*{S*6Cfab&e{-@4lW_MWu!a zY-x;0-tjK_EU#b@8}?8W&r;6P-Q62r=Knsdd_3t>EVXg})cRHiGEL{ng*3KEfiYEX z)KEJ5>%t(D>Q~)Cb>?9gNQgz55qVDt+%DJg-*89`e~%N4q2K4;#SZ=HcA1WLeqHqj zzf5&P@uHoQ_7ZdXX?X(5ur+vKxWMVD<_Xmb!!(y}vajWa#{fs;^pS$R^$H_Y>~3 zMMGFbbYXY>5;;gtz4xQ_I^;aB5H#8;*rvVS^ULq`@s3CscI;`Fz5qPNkZ`Po@Pyqt z{;CvM{hHZ~H^bh%l#2H4@o8om${_4?Pq4K)s0#f7F4ov&4Y8{4-)}a;H6g`l5m~gp zCnI)(x9Qx#!<3w~WS;CJJu%XPBCfGJ+hs+Qw{U${P`2k@Qw8dciw(22Pf5k!Yp;kF0A` zDZ-73;U7@&4CFB!HAq0Irp`!mvV-&?tl7~Ns7D5seZ{dt9Xr2)3m||%!;48ca>{REgwo;R6c>kz&B<(U#VK8$e?cy_dLCi+Vw{ESncBKP)*d^^bxkHjySFz$6_#x}g zM%(|*`Z(n(=acGnfv*llk@UbqwLOcMP?z#`s_6g^bNnl0%tc&2y=Wn35YCFT?`PB= zTg$%ui)k?iMmhZpAZgp7$YBDt?Ets^>Cv)F~2Io)5s9?vV}{#!~UD_roD255dG z1FC^KC~^${Rw>LRXYwQ?oysS(KxQo?7frD#WxN2$B6^wN@A5g%d(!%qH$j05C})h*ux|9!+$2HqK?#M26? zvZ(SlwJTd(VnGuoMz1X@_9hd^%BMCiasMRoG`Vt$2(YjkH-BNSx3joknLy)}%Pq3QkfF2k@_0U(;ZYv=ict8+59HV+3(RmHm0gH*e zuq=uZE<|V9I6kmv`g}LB*8dhcFcdFZWnxlas-_;j0_%pcxFRU>0Ny!HVg?x$l`H&& zT*dA+Ex-}>m}d%Z*ELaK^Q$?>{VxAlcI?j=z?iU<>(_k2FHJFP8*}JoH-Zo%&9eFl z%7<@mORJbccgjPw(uG}HY6&>msJ5)|)1jkdtt%cCKA60(ChmZ@W zSn66vHCsPaOgVzx3YaH=63>5N*R|mbkm9RK7z$o^klv75Qi)O|T3m%U2>;Fw5Uv|F zAdLG6%r|GC!`A5hD?XT>8N^W~Zp9}|8IlOxr*c{Lxo6AIbY-%|9{x0q+xeBTC3#h}n?V$Xu(!;EY@-<3eNM(nNQKcv zwGO4%bTbw!&nNwpsDg6@e57*?f_xGlUOY)+OR>=aXDXY!&wB;fgF#ElXP{UE;&8{GetM~OR!#o~ZqdI9@ggL=ZO!AC~N zSNPhXXaX{M^;$mm&kVg!KdZg8o_xm-i7gk5-l!zh*7cM9d5b+t8^)K8w)6YaXn4IG zr*u<#&~x5Yxv>icUG9;{XVAzE|&b%HA*Bw(v5=r2Iy6oriW?6!i^6DVQ&D zj<@3(q0Z9j^J619K_)jhD&aXi5*yd5QtL-tZ?%4wObT0ip>;mvTpOg#a)2${+O#t) z4J&7idJLoJO#ON)NtbI^0x4)Xs7=fM)b9%rm-E_rcZw{HejMPq&%slRc~0@K^MjCJ z60+pngX>Zs=X}G`)Nnqtl=J!fte)`6ZfJmM(fB8aE!mpr32mHw>oGEKz0@2AucTbp zt-*&^ABz9=SoC2Yex^GLP{VcX+FQI_;~!_Pku=~|!go*wm^H9m20qs<_%V+;o)vW0 z^rbXqbuY24;hXctbK@{VRTRivDb>*XfP@V+rIM%G`1XwMR=sOY)}j{AzU$ITeM>0n z3VZqiA@)l6IxL82^Pw)8L^l+TpH;{F(ul)gU4}}&DaZG4J5}Hp{odbk!%O1#{Rt;h zzTVK)sfs;Gk#>`lk0Qqnrza9Pcb8^aj;Vx0IH>vnhgSV3IE(em%h}L~2l3?9Bg%t4 ziMZb~E6Eq&ftV$mNz5eTGH2q?E;}7~broAR{DtpW`7+y8{2;dApHN-RI}KSxz)A>Z zNxJ_WlI@*!M~6p{dA*kQLHJ(8b!Gbm`7Ue*Gb0MwEl`Ak zF332Lv0<7F;#JBanHJxWDGqqg~*XKvGKIDv=x@Yb}@n4_(xlb36oUZGy z7AV@kE8(U#kQ43CH&LZrGA9A@CrB1izr52C@V7}7BMNT&+uX>7v!m{2T3)7=y?WU% zkgPGYc`=%&Y?1z#acYoeDC&6St1t4z#S`rWbK)Izi8NjwBXU|8R}3;VDg7A<>C?RJ#7 zGirD}52=C-L@$Rwoaj^acAjD?UrO89wC9gk{kU=VW{ppzE>63YKILm144U}-f z9!7Q~QZ$;r>z;N!I@jTDlXhJwvzNEs%AZS*@}4@XArwA5uF<{B#I{^Vc#0LWyk3BRX3waN{Q8?c z1g)KLJbyoRJfe(v8O-6syt^eou)&Zc^}-WqkC~&pY?rfy>}krGwHWQ~ zsTz4W=5Fr?$z;7;@J&zkd$}xmgJh#e_XUsn7OIY8Yzn0!JN5n(bA}JvyVaabMcbTehIyj<#AfxDOK+N*^*O~gC(yo{fbwi3to%2Kz#7~1N7A|pRHJsY?LhAt!KDt}h~GI z%eW)2IF!n;gCj^`O3!_!G!zV6CCXLnN9wI1EKP6@k%>Ozcrod2kl<9?w#KNHU;Vx( zz%_&WA&eP6(G964KT=y3@X~;vNgiPUIy%zKq1N9#r&d$5^uVic9&|A9z|jpHzuT#) zJLjlX|Lr%{wm2U%z-ZM(Ptq zLYK{*ZUcF32dM@i(5XmCwX70m#t`v+DZgcX%#*gd;wOfxBpmJf!wE?;Nu&6 z`9|H)!;7^NzU^)vOM9PGry&EM+BlX}ng*n}R6Y|5IzFr$a-TYq>BLF-AI!HlCBb?V z8DFcsR@Hk--*{$4QRSPbZdDmdg?uq4#HL(~nSLcNUW&c$!1hw*x&SC zuR|uLNd?-w{M4fPU$j&M}3ZR z1`|Ist0&lT#&2~d70g-if*U_RGUp5IqnP~mIW|i6-(4g|XCNwTk5MM1|76IWdA>(PnvI8^o=T$1gXsv>v8Tg$CBhn%kN^rfh2ndHxw#SyL)@BU)B>ZtedTTuUukmmQv4dAIEWa!^em5>>4 z;&kTdQ)8fgZ6Jt3DP7#e?Ak;dE$BhZgeZb=B}-O^ABc zUQ0XoDB>;*3NJhp{}A(=yqmAA4;@q5m$&hYswSX+Dxz?TSG7u)rO+;ZSWSg6aFstE zjR&(oiETA_injQM8F}`v;iR-boA0jh1Ic@;OhOB)ihJQ%C<*^s@{@a6=7O(iAWg|e zCN{@B`sqy8=YX~O)4#y('B(uPy{WYA^#4)47`7(b#;*a~Z!ZO!qbi2Gqk z^Mmj=xSL^M5NRW!k=sF__lkpj5)}irDl^K|dyLgzvbo!NI&;B3!|sj4fS+anIv8U< z#zN_QyKj{`DXnU%FV|y4&h#q5CpVAL5tBOfq0LL2Eor{CnJQWU~tB2N~!^+&` zJWe@Rvq8VU)i6<=VWkU>Qp|wab~sS2I=%-gT5WrHPFnte56~|TPfhz26Uc;FwE2PD zebu9<&-Tcfc=y0qBski@+F^{=WY z0!QuPicJuaKDOzu8VL(SI1gOS(J3=0A8+=+(b3RIuMYZpD&(Mh0U*lmV8XfYMP>oaf8zzcZC~gAuA~ zR*{D_UC238RQh>rCB>e@J=vm32X$*FQQD>_DA5k#MV_gNFRiuKwWkBj$UOee8H0`o&LgQhF1tae#gl)V`F%%xLkHEL2tU&Px(t4)3j<$4B zLNfn4P_6?t8Y(i{^Q)?^wD3%)U;>^ZcPVn1XK@XIjd}grA2}cF%}`*$U*H+e#%tua zgW9B#@Zkk{RG8%|rzMK}M^%+5!7k{$txd4updz!Q#(t0CSZ0!?otS=i$^e6M(Kgl0 z$~WKBAA-Pb-Px4p_qyB1g3cwA^Z=SZ)iCr`UWdM1B=xCAR!&Rh^PEKt=1l%8A2n2` z{Z8x1agd~RdWUg~^3Ka>)bN;1lK;4c^~6C36gDx&7~s*d zwoC@-I-X@7(trE<>>v4$sMpENNhq%1)wsIq+#Mu`DwID(8_IHHCTy2%9^&dZGp90u zW@ClF0M%arl#f&oB(Qn-hv7PWVjY>OPhMtRgLG%3SL-gRUXzfLIgoMRTGAJLEIM=}za1u+~;u{Pj?Qc-19XN}s%gmjAI zxjuf0;{E}Qn&`6iS`6GSCiAmuOL77?YDfs%_mAWef0ivqW6+B$>ClCzr7Vv!+>=rG zSYo!R4S_IQ(5^2}<75?K)g!9RMtA}r?^XRxWSF9llA9sE0OEDyXcFX%Yal>=YA4ga zLeXe!rw$n$EeiWiRCbi3irqn7Q*vzZefnaC3CUka{kk!c;@L5}h3# zMpp8nPTNwvq0d4iAP#4^Nk3mf40bK;RhG<9rYFU|svhL$KLoj6gUcDiqF~mhfQbNn zz0ghdBuQwC#!u*Zu6SauEIlqi$5h$R`=xo^pZ#hdxQHF)&|7D#R-_*JUG6Gw<`V`~ zrCsFVtSkymFEJk%ELAw3O4mt*DLU$vtwtkSqxJJIL5&m!0|C;Hpb_ujvu_UQOOwki zxu8OJlcq#?{2nbQQy9&?aq}Cmqu8WB92bllhX+|lTnEIm6x2m$hc*{i*fT1!)aMrSP_3e$Lu2C%G#h z5Skg_gWj-#kfJN0f2{Z&^iZX{vYDOH)@fKabm|VFe4eT7hj6L>F>9DTcPMIY& zx;?Z8Gk9eFn`jjbj7gtLe@!VQ8-4+bLAj6cu)XTYeCZsyyT!}xyssp2Dc9 z6aD^hnrzF-vV1}ilszz{6OO5C_a@B3$ zT}~2nNVnVrnq7h(3*|3u)o~VIbHoN1ufz=k%UJ0Z4^-F~cF^5LdyX~0=bcIrtC$PS zw>N|}gig4=+kWJmjQdJ!HxP`xo^FlR-9vbw^+!}J*o!aG@_O`p0? z22fQG<7Xei+w{G}wH_}S!Cwi*@~UiS|7myde^}{W#8`x0!6$HNz2#bqNaFvZdPRIx zOAM)CD~1+a1tuqzm7GbsCt!j5+|7c-DCJ%KOIlJjqSzy93j|Kn+^vtsuWNJ*+~s?v;+ozr{RU`-}H0>2Fe zZI8E8jo+ZIwkH?RMC(n0l;)AM%`$%{^KukFZHJJj(!1DebJ=2@kY!8!k@5 z$TBg{`W5*Z*j*CoeE(}WXX?B3(S5#V`{Fs0E5$%%DHSF|dJ?{4q-~mQJrV7Acpo|m zSY)yo>-zq7aTBSKVfX%tihp3+Y@6p3%rZp|Q@-QkcqUC_{{yW&r8C&6yzB74g(twT zzx8Lio;sPGT;NePwjAV6HD1RX~A zt)mtEzaJ*+!{R}Bw)w61GYBlEth}TYI5!t9ew_jL7hR6?a4#jT+1r8v*0R!Des)$sDlW)vqIEZgMI&#gCNx^<_EMCXA)6jd zJaZJAxZZ{Nqiy{b;`z)VNydJq7&%ZoPAGr!VD0q4nIfpF z!CLf|51BJk!*wv5E*qsOZOwf_YOdwjvZ3Rb&bjQf*Lw?7S*eB8d`R}&bjdHXBI>>( z{WEl?q_%Xp5zRuZj-!*UujI;z$Dv7p`!#i??^!^IKl>Gb0c~IgQJJ|wFLY7*JPvD@ z;+bpnjZDF!0)~qq1?D8WTy!B>Z@E)ks&JnoV=Y4Ghp8bNjt@{vQ4&DQAHgRq zmQ=GB`uYX9o$ZsO@x9NSFTyHb`4B=X7k_(CN23R6u9k;xYFV2m3d#WN(yJF4Uf&-! zd675j`|5N<|Jjp;AF#9hzWW%o9v=<9Q{eSg1=8SFnR_i`FXSO}-#;{h^8QiMrMgIs z<->EzeBO*{cLYQE7NnUB?)z=N56dD(qQZLeZnWV!kXLnYZVqZ}%IBp23T5SnnNCiNFU@6IdLNJZPH zDmX!Wkmz~=sc6vm2Re42TyvIMN6fhK!?rGYlhYuu!^HXWl5`l{N ztV99X{F;A=fC}SeN;`$fYIWZhnozsfT6-r?8B|V7r+*b638L6CTWa6vi$bM$+xh37 z4LswsR#rU=gGu?VbA&XkM@*(Gc7!fW6G0~6v&pzfzt}h4KXgJH^71k{!_1lpg;=t; zod3xn+D|V@kC+*EXim9Uz{!Rt=`t*N-GeZtVBeje6xA|X0^1}V@6-bY`MZ0^k$+e0 zRW^ir<(LsaO`mCmJmj#xvdrpZ3cJSf_@Gm6Xin-rp3nCfQKZ@1JUL*MxLv$D1(} z!tV@5*m#5Qk959C@^qx@H3%A%`K~f)j{l^J&t0la8sDABDpbalkQZNH8$T`rW(5DK zX~)JKC#)nXeNCAS8>Jb6d_@kDxJ$a{*fDqdT-g=l?Zeqw@aMK zSgBTp)hSqho_!OOetU{V!Ju2Rck~62&CF==I4y0A|9hD6 zCx|rTLh$-L%b1uTFF!reu^CfcQ3a?|klnd%R{foq&3|s%7MI)25{$09I~yWc5&xhl zgr2_sz`*m7u2wlO?*hpVZ;RFSRZaZftL{5NSx*%@Dp>b-lZ0BDM1J(;X{nN&KZ0O5 zOU*T@CM7M^Meb$^Tb0pWim{3?X)xn;S7}0O-1?-yZgY)w@1a(R5lybdKLb_l0eY8w z3sV?g=Os}_fQ^!P(MRpch!n=|3juQ&q*!Kxs0QnBg)qYiS1btWY4hK_t8!L!c%q|l zOgtyF8Y^JZhcAQC$1b~@D?aefpDbb7JYYyxuZpi2A^8WX3e>`4IU~`Psax=WI-xl% zdgfifm)j9y>{bcd(37ZDA!k(vuSixO>O*(bFa(&eTRZCm3$s6cL#5w4Af$hp4vfnP9K$<8%$+4cwxsoTf26k9i_mZ)_yYXovW1i%&9gZ8d!e7mK3F#$QI4h^ zFRd~1jus*ZhC`9Ye!jwE@|hHIA(l5mG>!sSDxPAyV79d7ofsbQ*EpNcWl$h`U(ESv zAKst2`wM{0*tvgwT)%YnR6GcN{ek@jPzCBzg7OzMy6?5i4K4;<+ z{3$O3Ksih9sHn#^OIK=ngLBC^_K>b8#C|hNY)D8SLfwC|{o-*VZY$6Bx)dN^2r&Wb z{L~#EhnNv_{%6NwcNw#)=fS=(@B{o=&e7a?qaT2TxyrA#@OAYan#|vYO7G(N)G9$J zlq3DopL}y)N@JZ=d8Ag^X{+cwS>yCjI+!?6&evM=B#PFl>V`0bPMhDD`o|IAO};lc!yBIP(h_WnRf zH~*xMZG}@nH-Pt_Y#NK|9Kxk~4}ZC>N-$v4fR8jme=s0ammMI2IdIsSvICyV%ytFs zN_=c)tyjAl@cH;$&4&5Y_bfqX=m<0Tfapk4<_mzj{ppi>50BZPT2Pwbdhy+cALhN~ zp96}xFK<@95(Nz14s`3jVaarI*5?REamId^5B~UDqS*C3Y|R3=6sg@NIEnM!4fA~8ruEcVmS;|N$iK#e5jhC4)DLX`< zBJV*fzHXnt*lF@HA#{vAzyY?Kik&YAh`$_v$d0||e2rG7UW8Z$@esKPz$`yuS%tiv zaW*XO84ax=V@Y%Uo?fedC{z1*DnhwFcwX*+=p}uJc_ZBSqx?5THbOiYqs|V1@crTM zXN53OfG}^XLri2(JZcH~HOgC*h67Z98{QI+nbUVOC1(UZpV?cOo!nC|aRcSHypob7 z)`Cng(^;>wW! z*^ahTydh}PRog70W+p`quKe9U65GGwU$}(_J&DC9G>Z|;D{MBPUoW2kv05zoR8zoB zug{N=mtxkjGGEEwbp^)S*?x1Y&|Cx0^cp#xhfveF5DAlahk@;!5aM?NEEK%iFoFA4 zr3}j@^lY`6pL&h>d6{1*PAb6&O&d6jif_N#P(Iw2*W2*65Xf;7xP6>2mBhQ#S57ro zAystDC{AzRpNUpXoQhzfkXW2x7Pn%Ij^6!OP1r{_>DEF5O~cRt?HH>%OvXEr5@d^@Qc0abtZDV&OAvMR>UeJxy&?M^WuxG5T3Rb`vdXoY!07^yu~`%O1xaD4*bjd&7Tth z?N5YbL$CAvp5qY%b{pzZ!MOA~<@yIhJ`=l~h|HE~NLH40oxD%iz8YOm?wyH(`hxwn zQgGoKT*r!R&b!mKx}krMRTONG)N?1LP10n3lws*pc(`jvA1B4Am%%Y)JN~mhK~|Yj zBzl$pC+B45e6d`9Hlt3bOprzc=loSt$Tz73T@=}u_^JLOi;5(#Nd&bon7qNJxtZbMVtQ_EStZ!wgF4XiMt0*&AwVPby6;yvR79;w5Y!9sx4kG2yczk z01QUOU?5*BU&jwFPENd=u_$%kHyveSWxEdg?sv#_?HddlKa&^`MbtqZFEW z)%YV7Y~l)POi{mwZ_9hJe~#p0?=N>QK+B{r;;gWP%&j@Ld?L5Q9VR;@@cxtle_iMh zF~7dYNRy=HHLw#GBongrc7-a*)MMUYxC_R{X$#5D;ym;i-c4f@+z^5d2t3e{u2$Pw z<4k<|2;2#E$raU~&1>=df*P%wV ze$Kwy-(+aJ&ODZM&GeQNpdGLJpP$%}Hs-2`m@tI zQHUY{$N%WXyoh?^4iGx&4hA^Q?{_v)kOc2pNQYXxv$ISU_knQ-ES}nfi1OIRJ2`UcnVUiry5@MAbI%&*m`8%zU4lABd$x(6 zPux3C9{zCQGV6Rg8>P2oZ1MSIb3gP6n218v`I}#sd&7}D^Sv5J;8Q{Wpn~cH5kJ<* z<4K-}j0YQ6QtiLWfMDVEP%#heZR0xgF}?rw+_3MX>phSgO5bfNn|#KKw!Gxe@#1+7 z^ABd7{eFAF*IbbF1u*aco)&Jp_a6c-NbGX!jUdU*maCPg&C;c(j_|I2L0y{E2B8~r zNpEV_qW-3U-7AD$Ev$ZjAd}i=guxWAsL5S#=%d2AM@7KMh{R_& z00AT3Za%vu49MaWi7U001t_y4uvJY@DXT6cEC*ZfJHO@azZL4InbPfP(xEp^H(ws{ zpT>~?%@1Ut+fXUj>B3xJ+^EuV64Jx7U4Pzd&&4iu5<2FN8|g`PrQ(2Kh`#?<66k8E z%|(T&p8X#H(?Bf0%&!;=q(8#H6a)T|We+D%fAp=ueiY+}zJ+qs>0=>FM++tB{(u!> zzB?M;TS69gkK7?h})4-I- zgChaKqbF`8>9x{w=0S3u?CZmOPg>D!G!)$jLtC$q&Qr^3`2bfeQbX4(P)jvDrdmuv zX~t019j-?U@Ao~MpOZ5Xv5aLV4n%a?(WJMPqu*K$lq)TTZfz2(>W8K6pivf)4heXl zjckXtEfW6#BLGBesXmD>Tm0Ege*7xzpp!_4{52R6%_`n7TzsPXrU6|;d`Ke-wpMF}`vBIeF-1{{Sd@0T^wpkDIN)x))_fzy8Z$RW-ki415reS`VsY zz>>Zf@BRLL`TozNVbA^gJ@(W5c%0H{>5QXt&Xj{m?FW}f6e)FHp{Mf}c?iG6U+2nI zer+5(2eu1ClHfys%P&|kJR?{L{Wjiapnb40!lWurZH0^Al#i$7Ycx@_qEAgKQCYLr z--kAyur7&>q>zhfq*2L{h_-73S+LpI&v(0c*uLl)6OSu*x%J_0bdH78*A8DN^Zi>; z)A|ip!%j?wI*bXCWkDg~bD)kC))*vSb1F4~%*y%cz4V*QlqzZHHfAmSe^Aj3eEm;i zvX0CV!SyVNFEpP%V5;LC!e#N(9mb#E6NdD37-ZMaVIrwBPw5kjCcJx}yg>fA-u`P& zw1J{&V2NJPSO6yH{{V<0^|juRh{`OMoxxq{_dYn= zojU&j{NDNqFrxW4_)~fX5>GKos=X=Jr_sZHo(mmH;gm3cUcGG3g!2GK{ceVt>&5<8 zBro_SpW`-SczR-DFa33IPhuf|bro-oM50X5_7?e6O{S<*lFC0ivO=R&!&veC-kBs; zhG*r)3=z0E6QMcu;kIpSfjmY~0sjD`&Z0;UoYi_3+0fMa^Xk?ueTUMCBxz88)A0-L zIr?v6ulyV~7i=%2Nz7UMkGNyAmdmElO5&^NO1KpIg#tA5%o)7k2~SX<6Hy6p16;py zv+d;ChLG^NmAY(ZwhwQ+CZVJC3O6^Q$nUpbIr;vqD&upbTR%pnYHU3cSj|WF?EV_~ zzw6;g`WkyTUd`wYz9I7h7A0ZD`V>lVYjEI+&!_j7U741Z=}LX*@u+B!dOg4Enm>`m zvF9$>bId>*39ERuS*}~@-4am~bj#e8?k({-qJl9!LLzqOUsxw}15$cNqW3%Jco+VI z5pz&Sk6e<4{{Sw7`||uD{t>66tnWCP=87D8HCXsSZWi_*hden{GzkXGy!5N|*vv=| z_bxpw@EHB9!Yh9|@e>qJ$$dgA>JjJBseJOhc^XVA8`6UehmZjGw)4UdJ3cFP@WBH? zIYk_`u2Wu`S9?}^P0=5kKoTPUrCEFyw@szVjuTcOSe~2s<0e*>2!Gi<0*6m9KnT7) z+X+ul)o^YJ^@M+~^LYONW2!-q5%rDOim?O=qxAUN$K&;2uF%7r(9GtgzqZ0*;ubt5xU^0}y}h;GJ`#B&D!w$>JFwHf=#hxvH3*r}QRFYR*jTK;H8Y_|;sla#Gu zayRndczlyjLwnyl0lHDL0^8eH@vO(8$up);<$p_D_3R-2bxm$!mC~M=o|t~Trj2>! zks#f*`-q|aDuNvW_ZY{V`?Kx0}2tKuMvjbI%3FV|b zmlwXs={+7(C_hGDwxSpMH9e^`Kb1*F591cXXDj!EMJ~$yVb-d7$Lgf|cc)b6{a(D^ za29xI;&&Z~$-wJ6YP~X3{t^^nQuZYpMK}BQq@`G;eIi>PKCkr4Y05K>9Oeu>jYXad3yJ6WJokLrALnyH^b)(7 zRJTx^1dcMg@VZ`{Lj~VL!Yj7*ROgTb|jErBdkQU-;DxWt&|mrN{{U+U^(x+QXoONa-)66l$q~Sv zp#+qotqihtNvtyvy(y|nozjL?Vg@vJOH(|%yLl%V0Er{S`}7&4%Gbz4Ti&UkEgo50 zqG{7VEV7bKj}05ja&DGHmgYV8sCp-tEb`>~+NSiWQKZTP(dA}kV?BiF(HeTiN!&Cq zovHK_aR^z!%D!tRp`K=skCkIXa7U?rXwN+44gGS-rO;t8@^3l0X06@qfc)t{p;v3(uGkA%=l=l5 zWNd`{#zvY~I<_rmI4mo6&YpSrf;?KK#-sjj%tCP)%KRSFj={l_GIE2hqNp~_bi8Wt z6l57^x<``LuPsaiS92bWtnajy5BG;U!}o4`XIwdj;P_N?jmj9mMyX^hvYv>+zrIrT zdJitXEfU<^)!U=vQwRamrh4$RB@xQ9Cq1K;07obnc~;!fQ>Xm|DhV%@3lvN|I>7oS zsA>oc5(cFjy%gNs{L@JSe9liX8lpsvLxQhh06{yTv#ha~`KX`j)p(OrO2bN-mfZUy zdH(>}Gq?VPfw@;*RHMq8v4e~Hn*CFQ$cn2iiiT?tfES+()3)L1mmhKa_wQO^u0KC6 zF=6LEhhcu#(X&oEMR>Fb%C6@|$q66&dGD&zlS$&( zgJPtwy7s%#1KnfF))#9feH}{AoP7=}HI9s44Qt%H2`y~i_+9!8$={x?10HumS1(Do)}?)>3qOlXIa~3b zSJ?AK&zG7$!vX#nSYh|S^Vgr9Y`>4XZ}0P?n-ZT^!LcT9W-@s3^cB<6%vfrkFyV{= z8u!c}IK&q7HH#vp3E{;bSwc6|VYJ%~3p{SH?@WvQkF8q+Yha`Q070CZHy*(c?u7mo z47yI5htH`00IQ%k=17$KWYP!;eJr0UA_wnEY@4>BCSS~@f6;Q)4s8elZflGfr-X_* z8ZnDwm>(Iq*@m>Sobts(L1Y#?^qAw9mVF%wQzU=;XyjbU_9vg^5vWyC=I@8D%DYum z;5*Wv2ztnWK-ow7=Z*9(P+pW%P4tpasPoRXY&8syN!waV2=02S^Ul;IR1v8XXvO@~ zfIoLwnPR%1G6R<1^zAbVZTAH-$p#&mDre4==jN5i5t)FkD~&MI{eR*Q{{U~#Y3Pol zM-f+Sn;0z~1cZ-gvih2`F&yDb#iS@iNfRSsD{)g2s@*ZW_MM0Rpr16eL9YE(O-7XK>sTtFs+WG3jjpDGyi>$Yo)nUxZD!sZFm>d|(3AM7zZ<%wZME%pw ztUp?Z&|STwhHxRx+9vCj9bAc!=lGFZd?(RUWv&(EB&=WLjB|);zq$8}N4uU$Q$n$& zmLF5H^P^Lh@P(iE{(P(JpPYKHPAqjEFtNWlbv6cI)bL*O{{VC8uzy;9-CKQvuk>2b zsN{UA;|g_t)7~JGo_cy^Hgt?Whp)m|_wpo3^&W7iM5;Y0{P44WW`zt~y(g+mrlnh2 zO<202K+!_c9;$8jQEg=JgxTUR+N&AKkrTBo@8+o1LAgETYWX@#5`A4tq!`Z!J0G8) zOzi`t%4p_eDAunsRnNuwcI7Nz3M~B)5s<`<9x|b|I-2dxF0Zrm0FiBS=;**slVsXB zRm{k~P?S!S)Y=%1tNFcr7NaGyrW`;g;3|7nv~xXFdUfg3fUW8C#sUr7E`3&z@3#T= z-EZQ8gLhbj^elK8ILNx~&-X^NgE9E()sVuU?S11+)j_U2_a1)y*E4O`iPX!zO^+o9 zuBdYh=l!52?L6Ys5j#@6<~D-KH=St5)1O$;Q@C*n=eSAX>VOs-R_b*M7@@%uhB&py4rEr@MlNa?2@YXj3SO>nr`3)G*p z$x?|E><&Lwj}obOtbbuJJfP%RpXj1AnUxh^0j<@%dP%&(Hl$hGU5viEa^n7v=921H zr%F4iQ|&A)nAIH%{t9P%DW-cx1@%uTCK$86N{C6WzL>p*O;>9h5n4NqeLCWG1y!u< z_%nHXHeI!`Lbp)Vm=y-g?C>lGsr2EG$^$V{1nUX~oHAA$M3iof08cXLo#ETZ6aN5H z^g4P-o`+kWNxaIX)LI-6lN3i|-~zf2Ir)3{-OI z!IY-e8sR}d{TS|iSL?q!g@Z+UtI`)c>(P#JOrmuFbXa<05CFnM(3+ja%4Aj%2m3ju zFkMj5NLn$r^Z~r-Kq(irDr;t3fsJD-3Hq7pKRuddW*PK76wVDLrDA($l zqly+-u z86!`!h~weA@>2#_$;RlKrr6JEof>ezH~vNwlL!4sKJ3<-C+Su*l9_0mMwYY_?0%=E z&zHuK%df3H4=WOtw9y_SZ0 zO?rX%v~cxl90O3RB)m*lUZWCoWZIGTa9rxH%7ODzlP#uC$kjdtfV})ltp#zNAZJ9l zCf5Y7Q|1~4Av5NAe_cyQif>l+bm6tES2&0kxfL4)G`Aj(gvC$h)(bilwOi z0)z~a%d>5bm*?zPafhNpV5m1XUt6aUOIejRVz1R&jy_gRNW-E1s>*SoY9g2rSl5ka zJiWTs>3vrQx{D^)I;}v@PrcDLVyKrjX`;v(-yETl)L*+bXXXygN-GmtudBNLp-GU| z5}0tlwt0Q=UauylR5eb2%k(4_g6}QJDJ9tMGMKs1zkmZ3+64*y`l0(9I9bm)256lt zDR9G%i4&joepjlX(Db+R+kI<58qPeUt@an3$|Cr(zAwwt5YTQHdb3Ge*}Eax%;=Vvr!PkG7o2(vpeY4zwgiO^T=Hq$0c0-J@;e#ZyqSmWy9cGu(B zAv5!ZlO3sPgBos>lwz(h2)UzUr|Xp03+V8Gu~8HA6*C`sEnul^RLg4mv9UnedLb+v zdFomZJyldXl%jnp3EU4I@jy2BSx%%gw8x9iQgZLQ5cSYw79A!9o;-;XZ+G!9J zHTx!$wXl9v=GuxqBu8$Hlb4;I8;&&0O6mM1e>bLoa@I&{WLP4aYGI6J7m&5>&`HKd zYj>CXiWstT>2M|UV6*Mfiny0LbYrRFH88Y*j7$1*{SU=AW%tRa7Hck&NqTyec#_BH zn}09oe?N{n8)P8hqTxmC+`^ObW4gv3tFz$aD`k!Qs9vA@VEKWX8C-Ks`m(bhB;4<) zPA=EYvEj*x*jOxT>BpjNy?V57o_cM=8LXRnM2!5HR~XO3;q!KwqJIe<$x^*i*1RsQ zw>?*EZkLo3XN0o&C6=)~kv{t5Jss&RPZZ-;Re949>QZF$3ilfLBiy zMLM27cnw4Q(_g6c6t+Q!G|h2+7uxPGTOvt^nS5w7NM>@uw53{&Y{y^-XKK)T3JPcH z%zBiavUV{8XVtHyAQ>#!-*s5iB1QxM0JxlI^KZG&toZT!pdc_*+I6KGp6k^Rna}K8 z{IK-!~?D!iok>L(N%G5+>Jn@#~dMaE$3QNOEW${Q@1LzU@gu!~R z^!{00Wk`xciYGhNdB+6#>IseJreRw8opNQqa^78wnLHQN4K`%Es=xYSaP!V(7oesN zMxjYc-^IevT6!f{*#7_~Q%bYbJczG68^~R~LRZ0mP3V^|{{T= z@w=4&0JXCgCavO`zBwc9Zm&4>dDcH@eP4Xxtd(0_`YIT9m6jH*LxX4y@9RWI>m4w7=((!Q~&tfqkf07%3C0FIpX0)D1i`Ouu@Dd#%#cYVa! zBB!H@@0F9>Nl~8L=xCN&?wfh5B>{ozKiI=(=Dupkwf?yd<3QLoS3!#ksquX9&tX`( zO4dV%i0xQ7cdw$mK>q+~ic7k~4{1Axpi(-wPOgxjoeln{!V3tWQ z3(j3`T~I&`5Li`A>&;cm>EACtOspLqqYdl%^KC>ICl3bIhrUMfUXh6aIAF>_R17Ua7*zsbeyqDQ<-%BZA_SoX*N07`M{ zgPO9Huch00T9xt>M2)ikMwXThEmS)+o|Wsw>>|U#le1+Om7{(=}7W^-)dEVFAXUVdjnW zI^&=yr0&)W@nF%>aO{_-nRc7Ghhn*y!8zZ+3J#Cy2+fT1SX>u;^GPYc$_5+y1n!=O zq2jsYetk?ky-+T5rP04Uy;SR*t^E0rv&=htL!#s)n#E_DtPBu-;m(1f5BJ|}+;vC3 z%r1aOm2(0>%sDSW_Ddi~^WQuK0)92Y59RqORsrThF?QQ;qc#-cU3qgF*2FhE%~s!3 zjL>etw8lh11b^s<9#@rp>7)MuoPGZQxBVVA&_O~v30b&8Z zDJbD4dGloR-Ih$hsH#n)dTOdPtH`dV?uvA{M$W0yr(Z2l)j!Hx{{W7o*2wG#Fg!(Q-fb2$e-b|{k$nbv?T}!hl@^9~cqp(%n^EU;hTh3d z>Y<8TO|!pSiAE9V{NhCtr>-kg&mxTD^y^*EezD`T^hBVS0-6V?Q+-T7nX&8n z()%Kr8VlPU! zpT=iwN#!BJ^^~flirV@H5mLoYII2DE{WX0PQy9cezp;}BTp5_69_>h`Si++HGt<+m z^))|GkhF=1ys^jiLN8>+vX;eaUq|U!Rd*}f$lH`~ulU$y$e%G9nU5o-uz2ES^jNHE zNR&dUMDV}v;9pkl&j(10tA$F4IIWU%{`@?~He=+oo9aw-^!iFV$_zo$Ni@8-_1}q1 z+idSA1azy$qW;X|s5$=tqg6=%07)%YWNKRHy!sXq61-%l_ojH?lP9Oes%aQ@D9r!aPf0q9M<(%mM z0Bx6aP!=Tdp)e@!%t7^GeI&?eKD$*pt{uT7SX5uJp-`42KDBBLsI&i z*2S>$DXl+5yb3w2yrz&^??Lu-YBCBKCf09D15dv|FLfV8?6&Nytv-2vFFmVkb+j|M zVrBvJ%KrdNnbKHKA7^7mo6&;O`gh^&8?Y2Q2xuspX=V)lw+xGpn!WS~;d#uCIy?3C zdOErFMf>r8pe`+2{{ZLm`6i~6O_Dwt;F@8j1vJATB(5k|0W`_j@f9bbkX&0@-$PKi zg}RbXq9JW(O7CbdHcKOypi|#Mze2gq>l#S3w4F!3-M07A=e`>@#wtnNn|cAuI2Jh~ zZADs#eLDkYE`b;EnN5_>D+59Q03;*}Z;6?UAwk*BYOUuNo^mIXQ|VRxYk=#D^8kfU zF88UjBpH8*-axBA*tBszlF!j-1bQ|HEf-inKhvcz$s~`&2HjD9PfKI`NF{0K{Ry8d zTciazD1iR}DPoe$@FuR<)Vw)pcf? z_|T!_Q$wgrBs{KtM$t4n*w(X>^QEbfD?@_6j7U>KZ`Yo%Yh5ZI$@U=;-wkK|H_tm` zNnD^O4$kNe{{XfC#RS2^{8zSE4Z|fGE~xs+PC@!5ZB-`zBQ5v8tr?1iO=N&QE%Vfe zPQD+Xy>PZ=sO+hizA;*wvr%lH+l6QNR|3DT^s1@vK11DG??}_he}k`7{bAa{3hNDj zvWq8L3{Z5GicZ=g_o4g)`F!S`mu%*Wezp#FV=nfST=qgue z!2K+#Hh>qUjjz2KFJIS|zNRu%gWc$t51MRMRU=GA=1GZQ9h*Pp$HXs60$C*5GjhbK zy}5TqPzwCa{*C8P6s}uMR;x#_kT_6&QN({&`$nxl=w*?83>;T- zSc_|tJC$`R^RoM(7lu%hbD!H}9OjScVg0V}t$(otG}^?+TzRhNkh-#YT_SwE&YzQv z^7^#|W!Cx$=v-x^B--kC+9?quPzG*e=jQh~)d6GatM`4LnCGqo<@Zn+%_9*Pkp*zbRkzO7B(V9*4@e z*NlL6XRT(BDVYP=t}<9$`bd0nd-KzFQzOo(VQVWb2*QV%InYw+PZlLo*Q7E?wq%OV zRgdwVBmV$R$ueXfNMdX-_Oe*d1q&zpw#EMdkbj8rk5H6O^-ZXgqJ!wM4;77M3O=}qT)NU! z6}|6Rf&qsBkz;@+1eGmiy-G zDHJ60In{X%;I47t7yb9E=^S<-Z(J`~x3Y20V`|$ab{Dm}6g_W8Qu-UoMe8#g zwfX-5UaC+vNQn#s_-cQX+sc%KeOb_igE#-g06!4`0RsXA0|o^I0s#a90s;d80s|2M zAp{aJK_XFM6LEo&q3|-X!O`IbBSP^MQep-(a)Km76_OCrG_u0~+5iXv0|5a)0mdp< zZ2*dWN(m9qQR_W4uha2dB_sf;w|10*Qu=cV{k=T7$2!G4c##=F<{k{Dw3Zfhn?@C(2b>wOF459^z1Fsa5Ot##oi2Nju{i#hj+Ky?;OpL#tmHukR z84aQpBqe;H8MmclTMKJ2NX?lM8rBrI{yZH+VtPP`@7|S($g&&Tg!7t53r?JdRurSB z#BO6~pX!*xpoA=*GI*27SdTMBz#MJ0SW01AZ%?qx_(%{2S)KkF5#LVXl7yfn{5HPy zu!X`7+xY`cE3!ufd^V(GU(LmmJz{pHiFBDTrDrB|Cu(^>Kui<4+rpY#VFe^48gcs; zhLQ6XhbV+_cj;ko&3>f*MY5DAjwujqM1fD|%rkK(UkLZ>Or&K8N^!i)7qW;4n3MMD zSe!mUMDm_`?Mh3Gs5qT3t@I<26wWZ1T7k?FwD)!8^QEaT5}`i`@i*yCq#;6g^PmN? z^3$a@k{rztCwhFTYqxg)0DhH+8TeM6jN032eSB-N_%a0e>%+@!I#alJ972hKlt=?f z1Fim4h|&NO2brf*vfYRg2g}BqP|9v}_IvBI0#unfwer@&o6e*Z@PF+y7VG6|2sTnu zB|jmj^;nKPvXu;~7upT{{Cq3_0LX#+QD7r^<<6G+UXYM&dLbnPWzEKuO@rCykr5qz z+K`|L%%x5D9Q4wjO3;kKXDYPjHTzN%Bm$$}PhIU;lHxHzwA~^M^do!gPNaqA?gzq| zDabkxrD9xL&-3Hp7q=e5dQ+qKBQm}e^5xw{%#EkGQWRIY%{TC;TTuZ@_`kX;2Y`zM zdH3!qI$N9$qR>qy5Zn=c&hTpzq>_+F^8g-%*Jgwz3PIxB@3lj5nM0SIb|_lZ<4Pqd ziBDcYRu~esUcn?sThM92mB6F>a((zxpG$}+_!M;`$d4)+QlOKmBHhP-BThr;PzNo5 zw>nEIM*t+9FTD)dRH{&y2||)VzF8I{xYKSGDMai=heJqlX-XWP0>9ku*0H#>EiIwD z48=!ok>^EC%$pybGK3Q|2~qQ>*a>tgB#;K3P3ff$$x^c;Y5?cCndjeFg_R%;5(pjk z{^=rB(gcqh>~QN@eMUcTg(E_wr&HF0X?Ed6k*GYm`Orzez}hsXaQ&t!I>%^kHiAt$ zKYlf`N7AuNg=8CLA1drtlkn(obo~#?gQ!}N(BdTW)5MCzVS-5$mgYL#ovR0eO2VU9 z5;~viu`3CbfppGpjqLY zPVi3J%#Ark8!h6L{7T_6FZCWYf}muo6aKpkY3|gKGDgJt$>-9tlJL%C=qRAH(I9FG z8XM{FYFB8Kr9|mu0b|_zFCgh3lC*MlP zG#bkq#2-I;QWjMJT%|ysf;^9fKy{@kFDc4Khfpo@tT9(_#W9$qnDC&;o6elQip7km zNaTM10D5LO(l5${5TTzn>@W8E(@QPSGHV#>)SeR>k4UU7f<)yYTg=_Z;#kAJ&XV|Pv_O5`bDN*&kew&C51&1t6#`6H%O2n*Z zRDUp_dzBwgm4244!w{s;?W}LIg~9RCvCbuL04^Zu)+_vE28zYVgW=2bTm_j2j`pMs z$<`ydnWx?(g(iJERuwXf*ZAMMqN6GI-%jEbKWJ!)@;(0iDaUgZiHRUjeX3RtNw75L zKnBW+NS!<5`M8{{V>l))grWg?}H+{Hqq~bEX1q(t(7jM_b3Q;;@QSn3HZ4rsvY0ZB|wI zwC<$1gqw+tyS|me^TrdEBS0oKJ=xy7NM5HE(eVnLe6{ddLO$S?H+gZ zr;u|bo8?|o2IJVIVp|CUA_e9v5bk$PzcW~NK{Fg8Jn7QdJS0wA!4ag^9cTR1I3~wE zyOl8-az3=FE|O;II?p+#60|07l*c`{j$L%DNJ^Acn;V1I-%V+UGNho#zIEB9M6B?X zPDG1Nr!X7sr6EgoysIWseKz#GSz?dgZNEh2tOr;>oxegwOr6eBE zsUjLXb+@LzHHa!JgxjW@?kF^(lbJRNnagi7BYJsi0HXP0tPh~96M?o(g}r|Cj?v7^ zKoFT%BPKZxebt8_y)Fou+64aq%cp%}Q`$OmizYM{^tI@Ass+50o&@E(M?d(_F-R=3 zl9ed5YzlzC+|rcNnLt9y$QCm;fhVUvl%zt=qg`$Eq;SpsX~LqTa%A%+ee$d`K&JCM zAI&=6cT!~KI_n+B=LY>LwFEhorZj~_Z?V$dDb-0-T%|`TnG=2csa!xAjRvv3F9_j6 zRzMaX5QA?0;*#RqFoH?kOo84n?@G*s$s%MZd5Xg{1L1LKH}9v~N?wp;5k17yOL0tu zr7|WbeW%i$0)3wU09tjGBqhPp-V-#e?x_PZlM*0J=04Qo)}9oTV4U3c0(<6ymg*a^ z_(DynwF1S~^`gGa%uI+sN@c+?d6Pf+U(@AJg)PFNaX&AeV4ldX zFNAD&<_$hlr72|PNtJcJ-uX~egTHqwO9@_F*3GFJflr_Dcv55!gzw7q%7WWU&XRuA z=2We@iBmV$hDa7^)Cz%p^=*IMy zLLwlidRoGk96D?(4~ZdABZz(Gok2;NG0)}eNDe4m%-k!G0yL)CDsl%DkS(bsfgLs1 zYQ^9f!hVeJ?F_`S3doiAidbG*)Q%E6siBo14!?@XlbK7uPj;tqO{XdmYg}_Py)x5o z=5@iek*M1KLYNs|f+LXzipOOMc%Ga7YYY~UprM!=P#GkP z>DFS-$Un6-X&QIU=w-ey(u9|P*20P&)#*KZM8^7O6Pb4=}1c9N&vvT*ZE{9rY26FH}tGAvX#ywpgULS2}*H} zzHKN(jrcw>ZORc6xsqBJ>r`DU2ZO>|TMJiB@NB;owK&FwIDdr~I z^QY0F5@Tx5@D8+s4pfr>+oqJfM`p6f_uor!jw7;!_>n%9i*2&gMYxQqo}hq7siT=K z9md@U^;n+JBRH^B4-F=oOO7A|#h2_g@}#VS2O4tDxh z9m1mrKy~i?Qk2`FLJXuBr4WRv1(YbLjH@S+2LAM{6y;J>2uK16=QQdKvY4jTkwkL? z&S()ij^5&*T5|;;YILTZ;~`cG1e5fdVGBx>qGT6*fSA6O`fer1bF`p>K`JrG2;?tK zmisFZi3h&1Wm*3KlJ&U!kyw0(4pLMCvR|R{KW(eBExVtDgWvxE^`_s+bJM!NPyJmu zOhVK`id3PpU~--%Y1{!)lo10)fXzpm9OX$luR4K2LWVb$WZC~a}iyX*C# zlq^^i@nt~c%WIhD^d7BCILMr2AYvgB`I@{;W8&~bmCi9 zF}C0>r4c0;AefzUj#Y}mp`@W$bgLqaC@ul!A|!hG)4-&Fd;KX3N)ec*X5*b_{{SGB z0#j&@W}Rt4BOoMN{x_rrq{@@&NOeSZU;Sl3#Ce(5U90rARWB(vunJd2-_M4L8cx4@ z#I_qiP&YH-v{N8v=1>;*KT@gPN^+71zW~|O{8cxq!koq zPM4rkk~5yLH>S!Jn2;`$PAv#100MMqEg{39K#ki#b{7m(vgd_~s7W1_%FyYHFYIh<}WHr1F zRHoD$%_IR4)bB%yxz$rxSzw&!iB}2fsIJ*aKm)%~%iqe9Z7#Zop&tm&;u}wltswn@ulkD)#t~R- zD3Z!e7o`a=em`m7K98i_1M2r6b6xe(G*JN%ZMIHYDGTT4N5iwb}%dl!6p_Wdr9}CtEHl zCj5nC(15U{i)9mUdsY{-aVrGx!y4&7N@oK8A=F{8=`K3*ico+go_E?g_32-r{{Yjt zoJZ?zrS=_f4}o+$iBix~GqPnUN{_{vz?+K*ullj(Km4=EXc7>B7jgdpcli$Np$EN2 znksR?lmb;E)QrhUX=%!2D3jBdeL9IsaD@b-BHHtsZA2P4i6eQ_=dEE7+Hj>E4t}LV z{u;2A|9p;^H5`xRAK?(SBsX!h=Or0d_U#cVgtWw@VyTwbs>QT3$1GsNUEVIJe z0CgY&qBjwkh=H%#^j<3fDkq-){3yoti%HB@f^v;(4=X3Wux{lmX?Dz+PhMlcO{fu_ zK_uLmy*k3wvTjOm;0%1!Db;0e2q9G*$%CySDoBsvn|*p;N_yx6dqjFuV1owTPda!Z zA!H~@Q>5!_(+`lOkzj0TU^?go+CG!>q%}Iu!@zh}Ew$##gh<@Q>4YdJtuGdq@aamE znLsE>@7LGyO)oRL2qy7Q6eJ@jE&IGTrx6@rOp0kKS_WdN(q`h;+iD0?Zb}rZfHk*W zwehAuP-FaZnByrSJ#21z^{1Q}GJ)l&^3(gK*C3|ka{1dz!7_|85L~%%P2hn%LTA1_ zDaD}$L;xm9075~zu+USkW|a*9nB;lKKbq3+H|KDj?xTn4}V|HGTU+|)4r5a98_K=Jcivg^rX0? zrby}|>jtyt6F#)O$bf{4^!#Z`h1qgDwC?)FFdGUaBp6AV8krR4E3{rw!xpu)-kWe` zl_(hIXX{AgOn{W6l6Y;eIH1}CS=A!xgBH_cSY!-`r^{M%G{%I-l>Y#AW>5^=iIe5; z&X{>vj4)QYQq~7K^RvD9-S>Pup`wDvugoUja)Q;hLWk>*pCQga(rZHqMv&s8&Zhl)+ZvOz1 z*a}WWH?{90?^tIEN{)%2UQzX;cjP`^{{Xr}YI#WtL?tQbYu}Lf%9EXxtu0$H`3<_r z^{92yP`hh>M0so8D+btwNi%u3F0=Hfkmh0q(tcZCZR--sn<&%;2?t*E{%5RHWq;$e zi6b(fhla;De{2`b@E+x6-4r6dHK&kxhy^h1T-gq~*8?@C$& zVg<+`d)MboxS)0)Ol+gbG<-DLc4E96+0M`QaGgQK({FH zrd65Lr}*a6#-2b-KmY;s6xdqv3casBe`|-Z8Z$SxDf` z1S=^yL4!W|(fvhOh7iF(As)H9#r(Lz>SPkcy)G4Ijr4`Xp#W2jSiMKzgkux zAi)HKl_P{17B)M6X*@|1ZZ@B# zb}9ohbkv@em<46NYGLItvOpI}Nu8&EPg+@V^(gSzRv7kg#-&Hf=kHFr!T$iR^@oD# zGqShVO=6<@a`dDaSSj1f#+RK+P2lf3Xer_%#9OT-$4k-{rC~}+fK&nu$9*PHvpbRv zC8UHUXGJdY$dL!Sl-of>HcvGfwfgzc2~JQ5RO&=3KGd}odRA!HvA4)&Bg@lq;>T|jO z0F{Z3k!p0RV=RHV=6m9hpa}*=x>HRhP_UuW4*hnHwU1dscx@)<$JnOU9&9LBjuj2G zH>9Q@R~86FR#~nXZTDM@~1*%<%u&rDWy(;o9%t#l?o*3Cf(G8kaDVFX|e90iBU+o z)8Cyqgs{*m)F*E$W%QM1PS+h3&%o2psl^gZlX7~}mc7k{$8})_b0*+?&)S193W|g& z2HdS^qJX6OatyH zpny%FdG)T?h1*Y<>)}n}ol*j1fjUOQ(Lt2EAYU{jb=32$Z|TmthC-H%ix(r6GL?hh`cqCR_d7PNu4B*}^V)ssj?9%G-6tp(?iZ{(>Fk%CUEZ{%7cP}> zk9?^nPFaM->mm?{Nz}<7T5vX3F@3fB&|0McUjG0?O2zSf5%i#-Q<+D*sXv(Bn_;Ud zSCR0^vbWMH%r9iJ>tixiJUYbRet!3)IENLKe-bC5rMH*izrVc}VL6s=s3Xprcr#*n zZ+gb7v?xdrk^+Z}(`gx1&I$M9P4NToN?TyGuBI*HwSJtU+8heBC~MSOl(jA7toM4G3u~R~xVnZ}lZLl2)4<6!Wi z#RVUyU1{7HLi26TpcMnO{{U3NlxKtxghv;`WRJAb_|Aj7LETEqLA~IQO2TludYk$G z0CoC{z+#9N8AslhL8e$*@~A2WG{b2Vn&N!xE>cY^EZEJfv{FwAf;2w*8dF7X>A$5m zkQ&Lhv($f7((TNBe-(&XEH662;SuRcC6y&<7E*Qb{8wU}g-Xy_Jk7tlb<~(noP{J8 z6_qI@ohV^*suiLdOeB0sHy7GFtF!+A9mf*C^)cLTDG5+aBxWZ_`FpiXtfw(W^u55H zXIqNSSH!DN`tKH`DL~JaFi=5|MW#et`<-dfRPo6GblUXJCBV=`Ndh>3F-yfd+7zhp z-fIYzxtV9;TZ&PSAH_KCMHbZ@Lrz9u?bf>_B-&u}rW|dxc4|BeNdExJbqBkqy(!e9 z`3&3X?rXEnV`^(N4ha&47(NEa*1uD63dO4}z{;8G1^l$8(zgf}Y{b6~mTA=pg3IE#7G&Ns=ojRhT;a`cpEYl}E~$VM~axCP_0MlV7U- z)*c3`aVfVepS2-Cu#k01K>5>{g`~uAq?xhQa=+e|Nm=1HHl|xCW4DkL6T{A8-TTd8 z%H0kh=Qks96V|^`3rggH-?@MU9^u`sC2C0!ccr|gv5=%0ROW*{Nx#yB%xy{_aNkNx zrL+=B9D&pC>r5$XQCx@yNb&EiJ|iIqm%e&dDbyC#5t&C#qS|{j+p0-OZ5fJ*jHV*m zo}Jg7=|~GGQiQE1kG%c6>k-+?QY5RV!Y$oCwBeQb53Mrk3WY><{`RL8p+Ozr)K($I zfSJrQSRO}z#TN6eI^9FQ`kL(BZO#n{%0|Rkbi7i4Y-Lh-i3WC$oqyA8l|Ji=0vrBZ z`J{M9jMwYUAtX4K##0j=PruD6T#OD>4xMN+Tu>rqBk!#yTU>e1J?V!m5;qg?%CYJM z6euBI>tiF=T5VsrM$-^G>k|mD_;Vj-p=6a0h5_!r)TbBOx5Q%SUB#xqO~rODDJD$i zK2kolkHi;T2vCh<$MIPpk9ybXtfLV6TVKMd5Og9H_^fj(!Mdi1(NdHD02wh&49f*t zP0cAvacfEVLAQtBdc&bIM?D7AmhzyGFVdb;ly74iQc~nekSB!iO@nf7q9l{4wHKp0 zLS)`;)|30DM?Aj#=%h5bvA4gSAv{yzSj8b&R5lUDh*4Hu4aCmV-=tGOl~`YDm8Dsn z(0ul(PA7&3U!{JQ;xO25$q4NnE-gkrA_)DppD02k0tQN?aNjJ1><6VlBt@53= z`%+K>p-Lh&y%LrYnKLGv1?A?w%=wzk8b>G-%ifgXTdPxmaP8bRJ|eKVcFKmC$(_B; zV^Hc`DpJf`1nZeQ8PI(CRtb61w4O=xgG)+M2u_|~YQv>nqSc!!Vh4A>r7ZT3Em^Y! z@PphVO6A-u^uj;LZcGcwFT+jY!U+d%e!5e|pr}Jb{zQA; znMu#4{{GaLLK3MH8VTB;00kj@g^{hgIkf0NkC@7z&&YMV1Vr*xJ zdGh+vDhcA$%wPGG|V5U3nG@p7?Ze?yk77!_yFDX|@1!Jw5w5@g01dht zi5>UzrMiZDJlXQToX95WIbc}-0PQj1PAMu?jgQuvXf42$@osyo75fP!CNCMf`|C}- zrRD_eO!p6Bni9F9;7+{2rMm!uY}@O&v{FNfY!yHp{3xTeb2sq=Uwus|g{?pF89jB| zT1dzQ*js9aIb>WxlTJ3EY(jW|jbi>9($kqea`~92&|1n=HNWG=l1W30PYH<^y}J0; zWvxRxiR6Bi<|Fp9gRe>m;0|h_e2wqrYH9*xi|MU=E{;+=wX8!Kf*fuNV~f6@Zzw7f zFQ>niVL>^J^0&TpwUBdhNjnk4%F{v z1@*bu_1{q2!9VsSwMx*6UrZN&Hd3)rb#lX?npw z%#j40ug+#^w$xLYAxD%Xntu-3*iQBR zwE9ysb!CtQ=_6XQ-V+h5ekt@IK0vl;P};{>={7XNP=!Ppw|C%E@d#9l4q$tpwEiQ7 zJX-6;-7_Cqru#G}F`h0)-185VitR6+ZKheyG9tv4gGc zx8j{sy_fK$23kWY0v44lY)~ZllenS@NZ{D zM>2X+!jy7fFrNLd8vO6ABNB!i$l?H5h&=uKnp@~DC`SJPI%&8Wwcgc=buBWr{{Y6r z{zp&EVb-$NZAno#{{Y&%AQE#cQRFT4tbq$sLW-{)Io2NODVUocdmDJrZ!3TjJ{cG5 z9(9OhsI;&y3?KY%es$TYOOa~Dzm#N~i1Vh2a>o7qx1}LVNJ-%XE?QD@+r!F`oXbji znp`o$tEEOKakZ#49cW~M{{SU$^Vg+f5!j@Z5KL@o$H`Hjes%g*C>$!nu6tW|YA4UF zJoaS(1SfuTNOYv61o-^xJfwwKnX%TGWOlq-=G(rW!!+8G@IcKOm)1}9PNXS!0!SJR zzLePsT0j~b^yy!tI^tehm;V4|fA-a(kuqb#v3R6B>`jsgS9rJ5mbQS9Mz>7Hhg~%q z#B`nM7=(o>#c}GX8pO&*Sl2;V!t3Z(B&47rLn~&)APcT~+n=eVFFunb8e#Vn2_)NN z%CYM0F!Cg2xe;Tx&h>x?AbjAJjtYgKs*y<6&(Kn7M9lg)6OMXxg#odJiY5`aj1{wehk;TduRslKOOPW1huSV=7D-Ez zN+2Vioa^-FN>)_A>Smrt7_jL}XepH-fy9{Q@~0eD5QKx7v#o}L&`!H^qVahnDYm|~ zkv*C0_{xsITUTe5v?*lVfiim1l!n%q604-#^y|))=WBzE9WPyxQnIH} zw@-3wE>|+^mwH;_InqM&1%srW2Zq&$M69UklHEx+^66^C5-}G}w+l+g8)o$Lp~oCf!B8iXbfsL8$`pDD*Seg|{y8AZHSj)F zn5x3CM&@?>(pB0nL6|X1BoBtsU#Ki8VId?%=bYI2MPLewz&{REN_Ue*D`kJTBJ&+7 zL2hBPsX!>GTa`G{SZ_0zwZ}ibVXT?6QAIl z^3vY_02Pl3RyI<2U<-V_DKDwV5*4C&QagYiwCiXHQ>qe2p!XDBTJsbnz#tgf)Yv6s zTI~Q*a|uXS7j-}Tz~rz=8G2l6=S(F)kgZ0v7NX%oU<{!?rm;DYxCCls^P-inKul}h zSXDLxP_*@)X4Ivk;i#T}FWZaT=a)-)Rv-5lY}@ayUzXRW-$!Bykg=|9E!;`;@T_9{MWz5Y+ORqMQDb5i-_y!z@~q^EzTed+ zI-5eEpM;Zb{{X4^Rym^cX*xi#=mxf(YY4#)jX#e#`BTVBSE11Qh_w}|qO=nLlM9>d zYo zBlW05ZV@?26RjIuD zG|B`Mrfobq`!pL?Yi&H~q^~{PC^96DU!5vRL_{k%2e|spAZit?NRKV(D2*-wK2(*V zcw{VC(gIaJ(EnQ zu>SzM6)PvkblYDFPAdFbixNHSPCA7(@Xhz6gx`fT&h*h4rUGMeODT-npPe_0S7fI0 zrtm~h-kMW~F?X(BQLv>x5@G;dz3bAH>2QrnPyj~9L*-B5=1CG@D*APe_W5l{PwxR7 zGkMzcNr0r~yleEVTEA!=?QO3gn*AdmllIi)r!a}sbLGD=U#+RX@;Fkg*+!%hd)fev zcRJDrbb+AK+g2eRm&MD%Ca`Zb2|6u4M0@K-7PYw`i6#%9f2}xy*@c9_@snxtr!go- zQ3NPOy3;F4!if-K-c;IzscOmqAd$}JO2-Ber<*BtQfXmbuK_FPK9z@NF0bXUjbam#-h_zqy*#@_^rlxQFIr4& zB7=F*MW=7AVViN)DJl3txu+Y@6%hk!Ehn{BWs?SP=k%r&(N_t$Bo6(km8in?pG~Pu zqYwLpW-n%s`1`#!LY6j3H;#!vTE&tfA!LJCV7}54q9ocmno^dfC@n=~D5h0=1En`? zuw(kHVN=c~MS? zj%uQ=*Z_p@3e%_3o>RDFqacaA$oub3jQDWj8J$vYd*qLuG&n~&7V+}+q?jGmi_aO8 zYuxnqDN4?Pn*dr_bwHR@PlAKMX6Ci2!?xQBP-^D2(3cT5YoG z_=c7q=D$*BBgZZC0Cv!8O%jI58iJ~6POJh~1vQCV&Mp?YPZV_NO*N_70h(VE@N5ao zJ~2h5wEH0^I8g@LWsQHj#mX0Bn|i^OC?7qkElObrOP!w3t@29Z@B`8INdWzvvb0`@b%vKG|gsEIo zGT&bc#%d2a^sm#>OsZ@R#c@aNr&0#J)rwn4Ya;BTJ!xPC48HGJc2$%s#+_Kp$7oNj zH1d@8RNi$5kff$un6BM!=FDw0krA!=(;BaN&{iNUArKUu1t?}pf7lE6{{VE_h*n!~ zJ@k~yM-Y=>I_XiBY6nnlK+QToZ9}CI#osj1$(lLV0kRV7nuRAKdKn|-Pxgsj&kY9B zpki)$)9$Uc@F;>o2C&_$isB`Dd|P|>dU3pnBXVSScT;6aP>p0(EawTIjyx}VT!Lp> z!n6KB$)=`uw9=K?EGJZ@QRH={j@clPL<3k?OCe${HZ~lJk10u zH9A<+OHv#{B6BGu&)%^zwo_pizVolYoe9Whb=Lg(>!k#@@r7P=QRS|*mBO1;d?zta z`+fQMig~pyr z+&)^yVI1AS-db^uMhsDoRnvqyzV)r3&FQGr~WN_h}U3MoU1p&?&HD0P>`? z3j%&DyZwABNK%9)G4R9*tRqf1wC7E;UPRmNSce>Ag#z6oVxTah-O7@9&y`6&lLtM>}V@G_7JZc;_Tk$O)PK#pRZ1uDNktuhA*KRM-3 z#6naA<-dje>9NBG(KjL{H}R}UPDrr|JM#n|l`cwQDY9=9t>%`NaV@2_@Gjy;z{NF) z4KvsdNeS*vsm?XsSy4CVN=Q-)M#;4#6p|8{BU9t-#bF3a!roH8A#r1rUtNdhu}@%e z2_2C@QU+YxgVbf)sEbA@*Z9#xiU;5#sX*XAi8tzfAZ*zY|506sLlDiT{#oVuW@ zF9-4XtYVIIr58xyPjuSUROIWHI?@&to9q0idSqTxxzp)QViu$>TdlFSWx0>vdE|punraZQdI#XdN66bM?Yo?T!@Z|+IQj-~Z)6Nrz zG1i%Pve!awO2rpQwZ{xv^jJ%(1!FHm^QB?Zz$r{X7Ve&U({FZ3kS6+Wee|Yrh*Dhb zU>}Q4?jL;VPbI~|ZM<@)RHUIYZQLm`pkq@bL3z@U(}^lK1d-@zw^p^tjL|CY)5A){ zxZ*faVs)$-?HUtvnmlbzG8knfi;<*Xr_zd2qOAAV>!+nS;lrc`)2}h_!j!(zWlDgf zW8Jr=w9_qy^AVcUYlF%uwV_rPGjX}rK;OQX1DbE4v8I}AfzNdW7wmi3K4 zw%ps4`}b;S5}3F?G=@}{Wh)s(D_%zX@~l=My_7aWVh1AvJ$du1lqn~4{r=VdOO7FI zrAbVcC3oewnA4pj3ie=T(l1zmTX3MLDnQWbKiwAGN#D9J@4~Q*p+a7pf-;+GIUUZ_ z`0U*&7Xe4enH7gjsS4&SJBFQy(N&mt6GJkKst}TGnpmD zf;JkNk1ljsFEiThFjdOGPiO~2V(A=n&TZ#4j(jKt0AO!!L$69(=|Zx(BH|)9Aafk% zu}Wm6HT`BP$1mff48maBq&>Ne_B=)w&K^C1gOB~NtmWdb)Xv{ED<@RZ?sR+oo&qJON`S3 zCu7}6&DtSFIiWEg6yQ)vBGPq`pAPHTAhOi;xjCKnx?M4q?W zdihfE2q{_;GV-8Gs@6~x+xU-9xK0JjpNG@I-1m424qb*X6v z5@W;rr%|0|b~XBgI$LXXpwy=}<1hC`0ZKWO&JWkdu5zezlRunC-hW**hS0 z1LCjoQrYB6tviW7D^PDEV{m-{wK@hw$dFZ*{bK&K&*{atl%sGFAos;)$Yc>4c~bEm zp&-fn5H{FTWiuT0JDLt9GL-EU>%mJ>Nz|*t^@dUyPIJY>ZiWxk)^erhRqju8nY}bz z&zRXW0BzIdOF(r3@x7>&p%AU9u?il9xh~6%ZpW>C7Sr?x+DXrt`n8Hi0Ap02J{+h)f?HeCaPZ zm_k&m=$Yrzzez${!De07xhJ8TdIOzJex`*>WC;M@xQcBnC*gq}{OMn}A}@HNrLH7J z_lJ{-s||+i7E-3woaIrt*Wl9k(@BcNFSL6!#3;b#CVeTaMj)d=l&9fY(3sz+ zm1oS-O2|_4A!amz6A{b-J!X}UKsit|p2`V2Ng5F_diaT_SCu){{EUI5dx}TzPG1^Q zTV*9mRp;NW7e~f}YXW-D=9ZNy>Dym8qLPq6(B%>3PJ-D9I;+E2`_l>G;x_jc{y;;4 z5ox*UYy0n~-9nwp6*nR}Q^BR6C*tAZ+bI|KG*E;n@ZZLPgoClatu`A{4rGF&I86|& z@T*@+!r;}3K~Qi?PK)#dtkLZ|K!r$BRUSmvClC_YLB8H2^H_D}QoHLAmFqGln}>3s z^!(Nxc~F7LAb4eur91G0Sk)y#0ZMT<)f<~19V@X;9E^oZ%qCZ@@Ab7iIk=69w@-7@ zf*C+gbID#Z4=R1uijj~=P_gkG>lO-LKta4_tn0CI_P5%al}seW)2c%$xXih^`qHr_ zzyeVpUze>k40noCVQsjb@~YI_2Qo#$iSDGeo_xoD%7d!yGOPG1$Hpm4e~?ioa-b38 za6VMEDW?UrwJY}?NG3NuZ#`(FBSYU!N{WtI$~`a6m~3J(Au6&r?wF5$bo)%8cQ>1e1O2dMl)a$pO=9wx(d^^vGibB~#lHkl_802Y5?Oa=nOr!XYLM`P= zNNK=QB1pIO@6MYkQ$}^04dR+xXmm}Wj$G@tN|Kz5&830UzJ?^Ntg$6LJ{0m^7`8&y znYD3cIwzp4VidInr8Wr=JB2t2O~e_c-2<2d#+n>5K@oX7Ry-gjwVg;e9W?sWDj9W? zx7wP_;Ry)?;XY$qQn0dC4fAQ`PO#$&0KvJ_-(Q_H6_p$(+|pAC%0x_(cfP+^r&ujI zNF-q1}hW#Q&P?#IH3s{(fmra^Q0+hCh;?qtW0SgJS!E3 zad;GB!gPH}TDe-@vqhkUDZ)uyNItaLRFJJsl+JV&9$?$wO2G{8tu_mZ>8SVTPWF=G z7c-&x(`>Aq!tu8L?Muy(BPs5toJv#2Ze8><7gVV3>r5q1r0f7uP^F{{OIe6W&Yz2Z zbWo&bY7-fbRbOpEzr&|l-)dTQBwYRJ9im&YkAD1X2Zjp4H;8fG%~^Pah*uS&M9&8} zGfF92t*ba7OmpX0yhmjKX>R`je)`rSVQuCg6lt}_`})%^oZNBCeNK}mnJ|?Vamh9u zr{9$y)7||{yEGjS8CdUX+aAET>E%UwDGJvpW~G&zzJ{& z8>9`cPkGk$+IEX7NQU-1_v;>&jly>9hg&Po*w7U5Pd*tZ#8XZ?pR*PSw-7&ZU80j9 z!l2FP$X`xF)|9@}Ilm9_^zy$txlyqfpQoiO5Zah&1Bj&qFzN@+f@IF4P(RckuiBRW zMmr@R+fwsn*qPpz?`g2gG>`1lY&Nuq6iCZp1P%9)XHBBGX8lipeFX#|P9}5Bh}LKE zK~6Z1(RnnzjnAJdL#YW?LvgJkU7{k>w9~DsNg~PRr@K<0QlTkGOn)K|jNY2XEwg`p3$rF^kTGQ>U{aTxLH} zNNyWWQ}HH59)QzW_kU0Q#vLel472{BLJx7c^`}2k#A1{Xz9$x-rL12xaZKNCbn0B~ z2^S{)BTuzq?ug7s2@y8lC*IYDN|FBn$6egAj`mOlc>T?OuZC*_*gA6OdSyI{caBD4 zcZyC|5)>llYa$R-wH%;}0bplhJ>Haw2O?5IzGJUZ-Oy89B==K}rU^$3n~`hs`P1so z-2VWqb?ZsGKo*~^Fcp|LB+2K_o=I&`NiYo|Xv&~%$~o)zr8rkD$x3BfX%j&xX*Rr_ z`@Zzkxkj50og=VYC1qp~lPTI`NagpZ6`YiTm%B<~l2Q%8lj#?x)54)hNy%;>>q^9} zNQ|~L?j^_CX)cu5WK0e%qHTLt7ZUCrwc(O8CDo9o8KcCznXu*k)^)Z3076Z)=lHDXQBrcrT8Zpjr93m9839tYU7^iWMyhm#4-6F#| zk^$EK^{f)5G2Io3OpKuB(D{ECst<&6ADX>z61pommDHq%7X{OYo zL7cDzQj2I=N=(OnHsB7SbE1dX`cv@giANC1C}Bc^iWPl?Mf?4V{{R*Axt&v&dyc*| zIEe<)NVc^KU66bGz0D~ia>tJ)tBxY6ji1a+^tU`kF$0Y+Xr%0r)w1TfMtuoL!MYdZ>Phm4L!vxI1-{nqW zTYI+4Xpj!P%D8B3(5;n{xNYn z?aG-_a-JFAU%{smp2bc9rk3YUrB_s2pE^NWlmOO}+S^t47WL~!WX&RTYie}!v~RD< zDW$0O*wyqN-fg_c#=iX>`na+_ssnpAg>0|WttR@>W zoXV?&lUP+I!m-$#8+aw*lBs|8%3xYEto|oKTd&5d&N4U;U^>j&14I${hkNW2aO0sj$5G&eWIE%+7|@hF|Zt<6smi z!6pIbOrBR7?;R@|Gq!uD{{YtX^N4Apqh-mB1kbO&RhT}Mty9DF@7k5wmK0EK1jhEz z3PRm+DI_Tf({#?)=SvF63ILF9PcEHlpidf%lLK1;zd&tEF(h1#hPI7q{A-UihH`|Z zv1t$t-1H~wOT+fEt^O*(DkVSiip4rw7ESxKrOMMP*HgdGYGGFgsHYb#stL`MB`f{Q(@I5YEnt?B>bot+s>U_tnJRfOODX@AFXF?rDh@_Ms^c>a-_JI)ZxrC8I;JJ zxvg&fD-fW80(D5N!Ui(Qp_|F@RBdFp1*2ktiY!>`cH*A#DJ9A z0L*{>r^OvMf)6sK$SrjDa+zEq_aR92-GZU(bms6Q2^ z^vH-LSzPNPLy!Oi>T4c?g@RQe-_j|hg*4$&9|?|U+E3Dd7z^1%_t)v57a~Y8;i#;6 z4XI6zC^BdMQf8U$Qt2Y{BHX5zaiAtQ=U8^L6Vq*t9t<&pkMs&26A=b{xz-mVT?G6&6}L-#sFbN|P!KI_*MEFv${fky0stou0`cr99Oz8(>QrT_5vBo1W zjlPpbg*>hu1^sE&Lro+IDFROCzMJBel|&DI{3-9!>8H~5 zg(WV6HuboyP7QBjlJ9CeGaaHgr`SVDO4|@gzjZ0oBp!5tLK{+lkk*RFXOYckZz=Dh zfQ49FT1ZJ)R7GOrDslvraNffg9M+IaAs1mNu*ta<{{hGAA-rNxy{0zEtDw zs20Lz@tdrP`%_qCF7sk0I`_>uQk9V!Tixjd<};=IsVYZl#bNF$LZ>N^XTZW8eYDits zpZ?7(JC5D-+7#3MnuHOrTmC7`NGnK>hkj?--<3YHq#*!mWS*5Mk_J=A#yr&+~|YyrW3+cqaaDH)b_9e18)eV zjN>WNN%_}erjVx)tLc}%)8$RQoL~K7mJ$S!H>3daa&-C9*$HlST3`?SA_vNp2{4^( zKJ|_ZMjfWeFd)xckF8+8?nfg&R7Yq+S71z@?dt}@{{YJ_B~}T_d$c3UEA<5@{D73( zhl}5J2QtK7FIenLZ>b6hAz=V4_2)W@qN-FI$ ze-r6J1#bl2_wb~$XG0TNva7R7e)p^{8COz;f=@fmt!jCkh)ScBt7$t{6&bIvqTnHI z>mo0;V9?5Q_MgNQf_ak~&p7dl#k8QZthSRK-t?sAfviv3jpHwk6&WBHjjIgORh4Rq z)33Wy@adn34-*vn5Qf4xBHC%rf)E6W&FK_Vm=R348gdNyQ$-InPJ|8j?un$Z6jZDK z0LQ?ce5ptepfq(7oA2H4YQy00*mEj8a4a_Sic4vC5^_da;qIH$$QX0O12TWTs}SZK zIFhG7aYuQLd#z3a5|S?`Zt3@}Dsup^Mr|jQ)E|U%2h-V6*jTSc)4 z{@^@w&Vf{;FyZO0o6~6pXi@}-Af4*cL=ienedx5V)kKJxiQWZ&|HJ@B5dZ=K0t5sD z1qT8J0tEyD0|5dN10ete5-}4ZK~Z6Gfdw*=q3{$VLQ=88(PH5wGjf9Q29gytLt~;- zbF#uE(u3mi|Jncu0RjO5KLPe)I?!ct%4%x!v|^b?%$GKSW$}>*^~bfcPhO=nOv*C_ zc>Fy{J^%twmNV)b*?cU!g&$~DH9g%+rM&vDmOY!550^@e+Mb-Go3D?<6se71ZUO1v z;fggCbt@6H_-}FZ$F&d|H6yOy5<$j8E-SY@fG2OC>4b2wgo6~vD|bZFzy757j5AcE zwBJIqwL+~OpaS1NOeIQn5a`nJ6CXdGKEkx~wX3?6ZW6NFek6itZ^OPlpIp9WS6hY9 zNE2YCyLP7D6vF0cBz)F=ext69$X+UY%1|;qealB(M znyr)e(m=4 zhJ~WVca}e`kLirvR%)C6EM>C_^q{Vm6Md3an;$>II`#3j3fyJlDi!=douhB7;+w5W zLhxf?Vs$A7W^WU;Z;aowgV1UyK~q#X1lqv%i(lk1mHz+*NYQBBfKfKFgYk)+WuefS z=hft(O}F{rx@h~u23Qk5BfRw&#&&o`XH=|%=eLG3RQL*yP)4CXjj`|Z^u}tM>krHW zTp1mA7aMKk23Uz+#nZ!Bf1 z)MZlBO%MR4Y}~Q&oKUvMYk{{T3kg(+$UA*q7P9QxN($JOv_ zy+)|5OcZL8v%Gav+9R#+;fAZ2nv~dq5}>uh#iPRZ7_U*%j$WN}4%C#P9d2#&7U{9W zRQsB1Qe?EF2|TeMA~4l*+)BA8nXZ~mKqTti4~!o%>Ob(c%jPomUa+XNoykkckOre~ z5Ko}6#VStpY)M2$D?5%`k$&*E?c;5H;wS5;u4082qLkg+Q3X;rB-%a}!qcSHG|^T{i%f9X zn-7C39 zd1jsT0r3hjUrtp|f&KB2rwA%A0KD7R_r*H&nvF}HRTj&lr{c7Gk$Bvg@x^*oR*h`P zl|)j(O|5U{d*dnX164s*ckncb~7TQLZYW)77sYclcqcpfwqcYtvAj?yK&=-sWa} zv7arOT3Q$Uu0aOzAclf={{VxE&Q_LAl*0;YS_kzQm>b8fv6!W=Nv7X=My+r`<}w=h zI}zUatg34Udnqz}L;=&hPAP?4xoTRKVT>`ILsZ%h-hFY!Oy*!y;{D>c46GT8u$x+X ze9jzYovS4ZW*H(0o?}v44%$bYcD4mNS#Aw2BV6ht#wI-`dQVJ05Q3rLo0E97<#Y&g~zS zcAlqxg7J)@EyZWJk)^x-ys*!>5q;dj*2@N1we?^Xn1+02ua;RkH%04m7HQ zOo1B6-ecz-_87@g&^aV1BUIcwB+2lTu!8{O+m!g(RniX;SQ~aAkJr-~8dWJ!3_+58 z)6zejf8q!2D^0=B#y9V`PWV9Un`5W$pOsDdr9n3Ji}~X<{$Yw{;6(I~PT0uhOE`Lh z{{V%5OZwy5bu?EhIyRB$Kb&#@0KlqJZfa^9O9*Wy3wPMtz4pdmFQvZK$*VO|g9Qtu zuLu-i&pg`6)v|?;TvE%XS@RW7I11VV^H}STGAH-xjLj^&qSg|u>I@yk{)TZ!TT|wF zri3a$yupz^7WmFHCB&}qd=#;R2Y zR8=}uuQr(jrLDL(#{(@#D$J7AK+?D0SB2v9iZ$Mx04(t<)DTSkM0B<&=5MN~zg7IV zi`p?z@wkml!4OgR9}kzF2@urFed2&N`1kSndSfRg3q+d%|dSludT|UdHU9Qx$w08#0Z!)-}MUGo4j6J{^d=Zc| zc>-q{k*2LGVafv5C6PmMK?M zhcY$?D{E!U_Q z-+rFB%D7s32fd`9m-oe5E-FB2lQKH$B;t)NC_Fcxo-=jf3JuH*Q(Dz1^9$3iR!H8P zJgnQM-Eo|yN%AjA=Z5LSV|~dH@GyN3NyjRRU_(F&(3l7Z?Tp*gY{gY1YFcASn@K6x zyiPJzohdCEq7v;%w&f()^aIbV_r`FL!lmrpCvWeHS%uR~C~B)&8(7eJU37;=VfnQm zdtd`~oK&MimT&H9=#~KfAv+x`WR)c9>$V!AaFDG)(#<9#OHPurw=R-A49+t2n3m!g zl`1YEnGnPCSy)fQd__j4(i=#sS6Q11W-Lhjascw{iu6rNL0+nw(gykjB3=z42@e0`bX`D zXj&)TH|_Y;i}U07Rv&2H;xp>4#ygTW!ymTz#^McBHHcz14xe!Ry}* zMQSkD0`{HF`ge>k7`n^Hy81@Uzbp8B&ND9Wmv}co40(0MJjW9*ud^>H^9vfjyJLez z>$z&9R)WDx0VkjmM49Q=38*bSQ>jBM2G|ZEWsdx&{{Y4u(XOr_bUSU~;f$fDLs-+Q zO{8sW;8UcmSddGdLQcnc7|^s$YI^xztJDKb`^Ap^F3s}7*3uBD)IkyD{e~-76sQUm zkm_>JSWM||AwHg8LX6aO*CA|Vj|Pwb027LoRB5YB1>vIzUbFuICjz4k+SHRGpgK>T z#xn}^_N>by+GYU;(coe{juEwuRc51s%6AGum_4m;Tzf*MirIRNOg59QivC_wr1%|8 z+sD9x6sf-n^#<3YRiSrcIte1(fx|ehvvA0$jvKH74L>9Ad5?u;e8THf%BU0p)s*;m z1J@Kvr6f{x^3z-F7=;nx%{b3x`1w|+MWUllnu>3(3j#l#`r{)q2n6^K zAD$@!$yv2Zg?FbM+Dsck>C1|Fx2cuPokL!h(A|oVEzo&%##)s|qf)h3XBtS~Yj^46 zjJ--!+FO@TUFX*eqcDg-1D3}TrIz>a+xf(?Ru07K8gJXv)@KP<|JTW@J@5htIYskSZ*;lJmn-C0lT0$ozJQ z@ti(Rxvce_%!0{pb5D4m50LC5JMA3(6`_})=rp*tfOQEiq@CjA;8mI$&`%&FPf{fO z35;oHt_T-ufP79IRj8E9sD&z;XT*O$EPcf?wDkM!5!}ZA033T#k{Xn%YgHORgRnA9 z%>LLsNmLIrG{XQ0VESg=uNtSsb_H~TH@}s^BN;s$En7C7QNM)#xUZPuttNoXU)(+f zH;i^499OJT9RtHD(hzFXQK`t0Ac-@vxBmc_vhyagW`5H_(X|V*z!75|&E^21L)RHB)eO}JsYr$Z z{4MkPIIl=OwhTo4M~*V;wDTMz?>@e1fq5d{?eA!eRmyPlV~Fuhn^gQ-J#CL@Ej7tA z>;C{VPYZOtP80M7V- z{J%8n3C(kJ6Fd0&;c~|`%<6QSTN8U03Pt2>ci*-$%&L_rj%O+Y->Vpl+oa$Aa`gWI z#B*|*TAcZ(_r5bVo{H1Z($3oTTaGU8ER0Zll((5t5t<@$OsewleUv$zjZ?pn<$=mayt4 zua?*a9M`FGX`P&0boX}K!u@9moK&0ZV>79S!WWbn7BRK)P`xJ8t8y6FTnOk#hfdg{ zsa_cx(m@~9xsKQ9E!!26qljUsF2D$nQ3U#vibkl`j-^bE3*mCzrhcOM)LmJPuckXh zVKcQwCjds0*MW5h%H4L`6pcE-pLfH1-?=!hYNc=#7*-JiBv_LkKV32Qgr%aTQ>6%r z?C=ls-x-|EMyQGsR9{pJ+E;PoiBVA{gb}KD@c!O-_QTYrNs^EUiV8HW*!G{Hx{z`I z08f%KwF4%p-yYYGyHO)ZGybx` zd^vwyXA7sZS1&rs)T&md;gQH?Bg35c{c-;QPO7DB@<`Nj^xU48tjiw{&|<37k*M!6 z{M=#7QyF-2$SVH;mmIKN7~iQeK2wb4P%7p}NC7-WqCG!Mdo8Jztc=Qrp;c0Pd(wQr zwldjPO1Xx8=_IfFt@Y>I1Bj-x36u`ZIw%jQ^?;5vWx&MAiy`ZlITQ z;~RNnKCTKCyD-ut;0LS>Q{rVENKjHf@y)J(FV^D}rmbM){nDhL9#Q<`Hd33T>IBHQ zSj8?LijBR?0q1;IA96_iL~YvmQpGrHVodqrDL_DOZNH{73&88#DQWgVYq#mN%^}^KFjuvOf%1`SXZ`&EnvCI{p!c4@;pH7B7Clx)OE9Gg^ z*{RcyI2Y-2YkV>6{{UuH_GjAj8mYG(Qj)HsSZS!uaim7}x!Bqun(?7QBVUHs-^b;E zS*a*7_jR@Pof>XMwi<8gyqq;0u5}fOs2HqTB0-CtXR(hAHcp*43J63XT$}Cin;(es z#%izGDNQC~)ff&Xh!G|)b8i4VF_e0>)eC@s%$FOmBrGtSa+6`c`(GK39UP#eO8VJp zptYniz3c>ZoyD-FjXgtB$tqv^SvOns5_(=X7|Uf+g*G)cJ2R5C7!5^Q8cpKow~jMZ zRDJS?4FD zie-qGr=*j(ER3KZ9@wFldM2BrGSn3&qzRfQ-a|cKzd_d({{REIPzX9C-}@We8J;yv zv$ZlLZV5V!i$#pzdFdDFiZ!b)MxJI$RiX@|+g5@`!&SD4zWs#c^KF=_MP}5+b)7d0 zZy4B2{I%)5tw^85gn{6&0@-8&3sDmY@peW&3g*TWeg+|>@%8b`ot`Ok0WvVI|o zxsyVjL#N$dQf3PqU*kX983R)_n5n8-0-4?hrkEQ>)ATV>6I2+GBm4Q`>D1FupkSd& z>oFX{>u&x%u{Efwg{c||bAQ4Z0qd#07)niEg%+q{6gMXuPyD{V zLS*T<8n@gJJ9yt2Gg^Xhnl}(fVW?l@ib(Eh>vIWE)q%XOkOk+Jr_XUBaaB|Np{g{p zh>Kgx>%$HzJt|zb_iwx*fPTDjQ`##{##2WZP+Co@Z0zV7AlxmW+V?XTrADtiX+)q= zO)#^o-7_~n2lqHoi&)cgNn@-L5Ih^@^TH!b0S!!*V1FqW5#bx|aRjSrQN(BqJxfKH-X_7#$8?N0OU0Syz}^Y zh}x8GV0N+d!)4beoP8r&)p({s+gF#5)$o;5FJ6rx7;9ZkABGD^CvUud+4YBNl; z5G~i$9#9FM zo>~3r@;+}OLgz1zp@!6mg@c2$L z^pCRFGQ_P%bsRQbtlN6c@xT9kNYlTjp^BMK zXJdL)*+3@6$*E}_Hr3WGz7)kIUW}nv!rb4e#Y~dW98JURCgAV%e6ZFsJL4@>)B>aa z1I#;yM!%Gc+DB6u)l!o?*3v^YO%d;CCzDs#%Gg~^K)jtQnv?hGg%lq@#i`1yyiphD z3<=)iNZa-!2QYYG(QpB78xyjKCP$#Rjw$7yscK-;rdcmmH8n+I{K`z}Hjxl{hClLE z>1E4mqG%JS-|thz4a^bSz6NHKvH@;gL#D02o||LZ6*z7w#;UbIY62(SD<^vyBd7PG zc(o>=3#b6YrLDK5`ri1iO(Rih+K~cSbg=!2?ToI2Sv6mWcs)wXbsiY0R=*wF?$ys2 z4kgrrm5!f;9~)yWTQfqHTCH$^4?}<0oMrg66*Ce-RG;^0j8kfE{4fJEk~MWvwZGkt z7zLDA3EFMAC-=r~g*uv1H9nR$UFULrBNcMUb8lW`lgjtBY$JSWpwGmAc?lbAq#KVT z9y=S}GW1%NIi}q8RhT%B^nv{C+HqEcC1q;oicBB$`1*Fk*UT+SydV@-WKivLu^dKT zy0(b|{o@Okq_kF}ajO+wV6hP}SePUo=k@E4X4Ey3y?3xRG*e#zB<=p8zB6H4D((3a zXViM*B~@cOt|YGyhu~aRqvNO4(#p5v7A4 zA=3t;Mp}vuI_eby4=JcjZD_Fg-Xk8@&sEENMKaT>ojko8e^p&>e!MYOuq6unu{~^K zZyb9)HMXI&*!c-N?aPYHtpo@Z!z9Fxq<=npVyn~pGEMbUQ`e{wznA=DDx#m;nxe>k zUuWBtsxT|zH>ulUykpt=oog)ev9RrLh2Rdj$iy0skVoB%i-j#48%b{u*BRqY8l6a$ zs7)ug`FfA}o;pu!-`5bs+GsUA2t;)Ecx6Wa0O~&pGCQ*lTE6{iA(2Q1$o}=ZkGoy5 zLZei!3s{mApB?&mDL*U>vJF!5Op#-08+=X?4JyP)BnBs_vNwm9L%uTq0ODS!9*6~l zDh2me!Wdld+ol$hgsVx==;6SB&PB(K@3_a>6lK#amU{)>d7JC5DWXw*Y|&S3SFqop z?}V&@!S|2yVD38Mo8CvZrj;A+PLXLCuUmy%L!a|5o<5k*WtlRmSbiX8JR&~4G3;8k z9*I^m73zT}r~t>$qS8;8Y!`5Kps-VkNE?{t;Pf3Z7J-oW& ziZ>drWgG6(5F>a8nrrK{0pfA$Y7%cN!~wfdpn3S{tQU*=nuv*!vz^S5TMUi z)ib~A(tQcmRUBDFELaOi{9&jAQPckbZmdVE$7wW4Fs3sG{}cBtc;KoT$3^Wlu8V^XY_AxmFRQ3sdE z>~M0l>2b6jMaC5;wBXLu)KEP6*7%ZzY!pVI_IL>6!Q^Mr8P-v zWn>N4U%5VFaaWq3x);-@*VN;ML+v7@-BgI^AWWa04Jt02RFknkt?(VuNC}3lFAesLFPfSPI zVNs)obrlBXhyb5hzrw>A{N);SDYI!UQI?v5V5X>JQTk~HJYW&lSK2lBUe8)JK-Ze( zs3Vjhh738n+9#y?Ke1}32C`NasZTg|1punrLH8O+ zA}44w<~(rEn9P^gRIXm7R;skE0Bsu+5q;uh`F)lPFdUbT!+40rAi<|jpg&XeagwTZ z@SJI9y7-Tu;rZh@0_mOi`0tNxQf8x-%+XhE4pF; zU^1Rvd%$BRN2UD-Oj72kM&VCQz1iPuMEKzDVgXoz-u*C7Ge6=AXXGvMIHOP!)ahED zpv1=gOqlN@Y-Wga2?Xhg9Xt=1h#1Ig04vk?>u7QZxBxdNq@5!f+5zd1!`%iW{xL!R z&KdMhf;Zayz8{+&(9DIPP&&qL*4lG z^ulEI*LUJD*4l|ihB_9TDm;TQW2p&^^3)=qx&M9ho zB_l(tt%u@XqrNi^%27lLYdpWyVoCKFc1qMhW#&*WK3mQ+UdgQag{qOl)-|ac2^xR| zMX%us*p*A<_=#Z6T+73BQyaR?8Dvbo0T67TV)WuOz7g7A*x$k^Yp{A`mW?M;GS0sj1(y_S& zKrz~Ff@o!(sIY;jQ!pI}B!EEty8+=C`#{V%8D21^)mo4hbzU4VDKQ|K*jsDiQ|@Z3 zG$kG}caOlbbpa-93`dpmmdtBfbrt^UxP}EXjC9q74q#1$fM(uUr8P5DYF|hKJT3D1 zV>^1fNcLvY0ECNc=`eYL z{8QI-aFmTDU=3N)j<-cqGL!CVh~B;Y$h>LYduo5 zhMiop0TDJN9-es2E`n*fm!_z!4dg_IGjdOJzkE}vSDS|n1WJ=23xOc^KJB7*#&0>G zs^0p`NA&|8{;lJTnO0PZGtMdID`hSl&ND4XLolsSC)n}@2dVg!Rx zU4oAyV0v^I%Vg*_ee)`LA5}$Nq1b~F(@eB}*w2r$y*kKxhE?uC6EXGM6l;C4s~WZn zvI*VqOnjz2aOuRUPMtN-5Q3c`?nv`DBfcx=S0kC=me$Uz$W-$vKZ-5PTBOXL{xsT= zpyUOT0SE*E8%4Jh;n%)0lIu=aPu*TGaChs|eu6q}iu8l3)!nJATzkW}>5BB3i?nnx za6B=WqfF7IQm7$mu?2S(VT_*$0QdKLPCcN@{{W0|Re5UGqN${8zjlVh@`;}#J#b>0 zXddVg-Vv+id~s5hHK)W@4rJe8xA{efz+UAUoK}ujC58V02B5RcVnz>Xywl{(V%9#AyeCG{Pa_Ytf z?mIvqyyBQaQk_CIYNnjs!Nz8gYJ^iz8K*?s%jMq+%SVSwqKS6m9^jK<9z*K*W$ z1;0Uz^-zMHT2g9LaBMf9!#;x*s*`wNYaF2j8T{AV;nx%vfuuJK5wAT#2X5ZziQHnIqY%F9X>RK7LLEz$`+4l;a7h}CrH6rXZ+1E*_g0E1=Ni{9AweOk1#h+Jx@ zTC*Gbv~}7H5p(drTVpMmX5$K*Ngz*Q<&4c>U5@G*SGAbl@OpfHFtjS^tb>}9Qe#bx zw4DtQywk5-#E7w*<0X~gB-IFN+#?klkU{4RPmD#jH|DiW(gv$ZOr13lx1kds->-$y z%U4x8z2R>&{9{!Vr*q1n1?kcvCfEJ^aIHz}7mi1!T0RH2fg1=vJkE`Lps9izu%C3# zUbA7+{ctM1HcEyOIYTz71VEc<8-X!@3G128E@UMtbD^3`ZTO-*qpm8`R^ie{1S#eg z5N#Lrov}%gG|-EpgA>s7n8kB7Q~?D~efsI}?bj7@>r$^AjU)lfz1Oeyk6Z;61QECE zwi==XR*<|@rxuwkNc8FP!_j(~?6FG3Q=q}!RB6;yG^YOmh>$k4YyO8M?pWDbf~qbXPy^y}bN!pVP&Iwy5t1Zw}rVsXu68GAzPxXA?g>dqXg3a5!WQ zI$(jZ*Dz*g3}8}f=Clwvqg=p};OgEye7;!9Wyo_%urtw9h^6(*ZgN@{8^ z5=zg7&H6;)vhAPDH{n`wro#FbVm$!x?rk|N#TxaqG*+`VV=pb3%azh}Jz!Xi2w4E~ zkpPXRG3@&Fj&7hL6~SnWbpkDqX;91@&LrLPvg@p5OZ?P%+YnzHRp$2x^wW-o2T->1tBQEND; zhCn)&I@%-qcw(J4V>*m<7!(?ouN&bi1Ys{G>I3IV1WdS{Bc^7vxpw+TpQa9?g<>$zgt@s>C&iIiE^ivM|0luZ>_G} zh*c+49VRw0aC~DP&r?E{VKZTwmCMs>$=9;#><>-2`6*BC)qBx?Eck%|1J{pg4jFK%SN?{wqNPPm1hqpeJ$y$}H@L%OzUb+ygMwTTOcG@@_3O4 zH|u@8Er*&bt`C1Mv*G7&JY>q!sEpE0L?mzCJ`xXoy>Us(8jA6EET8GUw%$IY6^uQH zj^01p6m!(G5B7HVxqz13s6Ow)ZERCV4aEF0ramHme=KG)l#5Fljl`j|mhW8koPt`c22j8T~afb$S4k=wtTAUXlAqT7Qb5j<%ig?NCyJ05w9Wwfb=lA1p8in*(tN_M9L^oiH!TKa6GR8B%0P2L59e44d}9*O%*t z2)@xS2P@tqV*AHyDzph(9_Ub8^3Q$#0(>$l7sqHHp zZVm|#+{EK7if9r{iP9I+NsnA=;%3s;rrHzYMg4wQDvXsvuq+sB9w0@c*ji+Od|-NS zin)j)gAP*AS~mb2M*H~=_r(NPU}8R}m%@LXI%S+VEP#O z;e!gQ>8Kq>3h(KPGfhJK+LeATsz~&lJ81kfxD&MZ^Aj=Qk7#DrofUit&86+s zsQ}zSk-wLYGufKoXHT-|R{Kp6Ux-ST%a(PjMv&yR@W>sck|P;<2BNKOyDVzbNfK?i zfC%3araO+fYZ#7dTs^&w(!JCf4BDj*v?haAQB#AM^JhnQIBWT zs?)`QHEn(RgZ}A^Rw^uQa4omd7^XC^zbN16Vy=pbuAZN}d#v;mzTL2EtMIBxRi;)? zVz{N68LY`gh#b=0KsFw;@Wyh3tkaoF$m&=6Sm`Ql)OFi__@$Sjm!(TBNdnWNfk5=s zehkflL2OlLR{sEAxX#uYN!j|4*4Y5c)MMu^{K$!FT_;<#I(zlWrwD^#JnV#TcwnHu} zmMR&95PKUR*A*%p##+8uMxjZH&fj+guc%lU{{W}+iVbQ^jJ%Zel0VFSWODxiN`rp0 zjP+erbm@Tc2fSFvgySzvgkpE%5#<7Ze=H)?8jVvS>__J62bA^3N;1Rj^y+FX+)}+& zTEc&Sqm0Eut0@NE`r?*quNRz<2iIIxs>V*^dF#FfWqK;=cOTp9&lPFYXKq65c8?3& z6!Pz&TS;Si-gQO%d*Z7jwws#;J*R2<^uo{_HX6O-@cJHj%Vlb{TJ=df8e2^IpP4@# zW-3B8758|Bzx_z~!N;;_nz@4qOeB-*iib{Ok^{{Qp!hPkdvAPfPkA%v*BO15S_H_q zuK4!#vwP}Y2p7Y@&mPMaYGyNBME?K{N||Ln-%tQf_rF|F``XYq)MMj(FeyN?8DLi6 z3jjbS0FT2Sz3{nQmTH}9)g7s{7k4L-EvS*xfxpkk%e1{O@GjlIi(ve*k*Q6?UINggAa~M9KNEhNMk7r=Zf28z4x3`E z>rkTP2_Ie&ir;6+<<4s|Nr;MRJWWJO>4GIpbIqgY?A`-qLsF-tIVH7w%22}ii3ZUC z?r)67Ua@p)Ybt;E-G7`^K|yr13Yy7|>Qq7S(;q)9aKRv+m+Nl0%jW7F!<9e*pEL;sNW7ws9*`T2Z;%}t8bNT>dyM>l(q<}ps8dDplx z^8WygeV`KbqDm~5>_jUsqkPYXKE~Oxm@8_Fq_2-g`D58sI10?nIGUXZ?V|VXzpqWP zTCSYM85ZIwncDsRahV)9>`b3izA09kv?-m0u|8mT`o2B7HPxc33p*$xFFejaan~7I z0)|GgsmWHP*@&{o@7o^E2QF$>tfghRN*dF!Vn5Od>G--^6ejNrV)LJD@>6S$6sx+Vf02cZ< zr|mIUCTNBG%A7z~!o^RiCg07UW*!*K53^-y>8qx$YO1R`Mx>^Rt=Gd-R9Wj2idFRG zDHeCBRB96UuwVpyysd?=Rp6AcA;)N+GGolnJ))+*Q92TgLH@1&M2veMEJjr|Hljq& zhNB-0DoK;DoM$TVhvpmU9sXW8tDLNrXi+rJ-qPaqvOQX1PW?oktr%w0s)!pq0(=Y~ zqv6vGO4IuA#$K9L68fTU6LL4c*0w5E;il8etLxJET8!0RptzROy+Jx{@WoO(^^!;J z@Wxmv&Qsy?h1Ast^vCT#JUQY5Vii~qFgNm$FH-nkxsM! z0PVn^c%D=1vBf9gQmI+FP_ULg1>}C1KGQi`O<5_WLt3-a*7F)rE4dNi;CyGM9maVm zRH0>ZxBRvmg{R%WPvaHybnDbmzV$Ptm(l}wJN~%)D9lwH;XncGV88*b;A8jL;i)Y+ z2m{1`P0Fby9wR(|PBLI}2>bN<{>{{SUk zuUcS$cbMDbx28RzO;)Qi?95IwnFY<%rkz}cC%~JZK_}slXuUkI3V@CsI!K5&jU!37 zbEZ$OGTCIUN}go_m8k4vW>KMqx@>(8EAc8+*TW>#Z*>765`Hmn*B;UKajlf&WuRsf zsxUyr(u9<%p}M;bBp-GF=EXXdkW;0YT`Zu_i6(t~BMV)TgkIk${jpA{&1K%>S_2@E zJT3M5GHyr4$PgFM`*Xkahl9gpc&dtNaX{e+Wr{!A!_-G^;nPcw((~Z>DSW; zKKX~>cm#C>0(E&syzxUSh@z!MmaKvIzMa>m2MkkKXwtWs7wfl7R?KlTBiXdn)TL6V zIX{HBY1Bo9H8J>SyJOqb#zk^;KQO5F_kYX^XY}76^#1?}@-+yl%}hJ+?lh#vtL_U* zshxrCII0ziu#5Noh{vTt^C}DrSIP`Cu~x<2SbB6q>L~npuEr7~Jzr zToLl$6hT~$myUO5BH+f$*L}~zF<&k#uPRFmEN)`aFnb97@F`SNqM{XzyZWE)k7s46 zYHEfFKzV#iZg%ZBHJNEueqA)yOwU1KW98EZx>lpFNF<+CZ;gg2txAc{jZNou3?s?A+C)c^c6Igk`C2uKpRHW zI23~tVq3pi#+C#koA~#i(-J8x*I_SV&<2a|@)NvaMLrc>H**GhXq$h3+Zox^<)EEL z$Npy?$5>Z=MM)q)AO_~z2qb~O!xaHQjtB-i_<2X`wldY9GsBZLI8@O{_g_HL$FKudT=uxXlLEvf?ZWV&Q zmX##QX-R=|e0x7BOD~i)DHevHw3%AWGzE~|$$dtD7UuJYG70g)t5Yq-txk0<7b-!z zo+SZdHsxm$FBrf~G*?E}8W};^q)d=U!bFav z6;S9>#0dnzI{|%$*bYq%D9iz8D$^GCNb)*P8G;EM?09QQm9I9YG|V~ zBZ%&Zsz3T_AX+4zrummEm8vfe${YP4PwECT+RZYQ44RX3y{2#ABoBrg{1S?ViMSNR z?)%g}SC580t(W_Ccg(#_6yGgH0~v*N)GDuSA>(Ub;q01imiEOeIdUq~EJbRdY#l1d zUjb?HIBeGqw1u#-I9DI$fw_Tch}`<&X`I!58}m~NNuLNkeireA*9_EYEkseGDVXVW z-wY0GGs?5Av;~g7?%zC9%2CTUR*tBsq?eD(Zbbh8@$$#CGcExF%CZ5S!P_3osh3Q> zI%t8|7HwN_*@%?96SSD+@tk{Jk1|xIbQ2Ok-TUqRu}@035QzkB{?I1=H^z9c+3C}z zOtnhPAyiDqr$TSxeT3q&iayV#g>*edmR`ELj@B3bF@3Pg#+GMD)X7I)p8-~ut@q^( zwejqX(`8>UO)_baj}f&^T2B7}(q=#-rueCz4|Gym_5gz&48&UACtxv`8H%@OBon4Q z{&vP?hXsbHgl{qF=ZbXRqbe_2mp|PA5M@q*y{v7qj+Vk6B1?bi ztfj`s%HwXg9j|(En5G#MMLK+q1X+dlnb^@0_`%qpKG;{%d1iBnzasB!BBGy+;ONw1ooFMyNNyW~{^!a728tu}+eINILv4^Yq1CRjScy1fZQR&_J;b z1ep3g@m)_5)z($DiI90;uIHq9;~|ryMzd6#fdjy|Ux)BM(Z@^L$0;8CnLjSLg*X&A79FZ$tDZ)u#5?$1i?g-x<24XmI*<`tka{C}p^=0*N=9 z#k>7^;)+tIP;neeZKP?LkvoxV*#7`H&1ihoWNBw-tY@GU=VC zc_;J`H^pF0bwZ}+-r06G!BA#EgW-(x8hWXXCy8yk9-qAb02rY_aqhabsU&n8M&A#Q zo+;C$w3%e72BVAhU?30sVmy3tnpaaz{{Rg{>wAOW$cz$W87g%KT)4Lo==28X$jFaf z_Q0d|bDqpIS>f&36mRf2T*|ufshvt(k=K`2->x%R+PRHLq9_Mu6#oDU^d4q9o8u{3 zrAm~-vZPVrzyN*SJCPUlj<`>K0lY9{MpG>pc|RJ&iDMY+#AL+CUt-JMcK5k;W0h$w_cd`rB?eu3GX42 z2blgbRhS)Q6^qUVcVbM)BLD-MR0t|^lEmB)7Vte|6~fbSbVkt@7Pb8`P0^|HohMhs z=>Bhw2=tP&>1Hc))<-G*Jh4MEtDJg7!zwjN=rob5^~Ha;<|~dHBC1qS0XlVRBbq#n z%*otN9ha`59c~B5Y-e<}aif@Wn{#>MnxDb~7q!L600`I+=C-U5!Q&4NII?l9|@5LgWz$Q7O>}&zRV@=+c zplK?SIA%q?%a6_T^T)JnQh#RVLRD0&T8&huHHBhI>6N^j+Si$mxS>HZ*(!AP6Htkn zk$IT)Ht^dOoYOZwmgZz`vwL*DI6OSHDsy$#)7-otlt;>N7l^`{AxYa&kJdk3@XhvYlkk7d z4WTL#wyVmqn9+TYEzetI>eZ=|T8vj+ElAsY>P#Opq#oyc;4T|eCVR)OGdi<%A4!8m26>qy+)?G;{)FKu zReoP|0q@7@8=k`hEWs9^5@MlwYXd3;!R;39xW#Ox9Kokjl|~hp-$>QORb`v^f#Ymu zg=*D8DdmWcfIzwV-28DyoI=NdZvMXaa%;kKFIn0X%F8y``M zG=eC1BnH%XfC{#o&xe)`dSfVto{^=r5Fj3HMo9V3t}@A$e8qQ`YL7^!^LdvOed683 z<2y#9icLqA`BX5sx<>3e`S!&^G?QXG&AvXkqgOGRIfGj$GW6-@Lgk)$svrkkjiPYA?E1>0p=#t)Dw3p2LR2$NOxxe}-+X5>dCcBvt7Z6& zLZws6w-n+zZSt_0J&qqCTAp1RNo80lcOB;$nv8i(zhjSOWU6PglxQe0%o?K)!AT6% zbLb#P(O5*mnaS&CDv(&eLYLnr;VUuT1*L z##=C^g<24#ERx^rV50s9Z(L-uzj?%E5O$v35AoloDhhPPQ>#c&UI$(6@go^J>q+X@ z)5Hz;Y&!!i56IzPi%pqC1#MCOqJOp=6-KJo6vngix6c%+aaC$1w8;_H{J{B)8mx?J zJ;R|;7G30b1HFf?GBpKyr0FUZRf;f(W}cU{U#0O*f@#>FJbP;=!_VdOA&FX^^x+$Ea9q+M~Gk)qzrr9I$>PT{{V0|oCDVxM{lz-l3XoVU0t+K>o}`U zqtjxe)A;i?^~e1|m!jD!bd>)9ip!GOh3&-}fNso^pn*Q{zf5JQRmo8m8kJ6IWr6Qs zp&kZ)7^jxa*CKNDpsJ8`_W+3W>BjL*w~Zh^gMCx47O3pD5J4q)Sdo#_7ysLe9@C9hWan0>I1BM z%M&ZN!yeJ724Q$Z?#+v{vosa)r&?i5r(VK&w;dw<38o>xk0JBtz2d19-v z%hS??tuf8el3Gve^7I+C{{R7(FcfY`{;*)3uV}_YLwiD<7WZpvrrYgyA2l1>6|z*u zS<@?Lc3ET8$%X5&Z!1m(AShE%G_^FQ3GP4>`{Ozl)T;aKBea;9p2e}3qA6vw3sM%X zO(h2Y=D+^{Em6~9-yY1MTQ4mj%ZTm+0uN6SjPa(F-!9GosK1KWl0x;|0&$ZGMu6oc z=`aQ4{t!II06%Gac+y>Ivp42EHBwJQI!D#gsC%I$xk$e=~~wehR(!1)_d^u?!hNupGnS zFY@y2FtwjZ6Y>}NVz~`coCsjk<|Fq$9$0#ma}|i9A5}O-^ZnQbqDWOfn@8!7Wzwde%BCyx zd(Me{NC|)ssF>}3)2UReFqPCT-*5TDRHD58?aqLDjZr>_e1Fu&s5w@rb*)QrKZO4P z{`kt$e70XGRzq<0JpTai7UFynfyE^$QEI3x6q2M!)wmxrC*g_&4{6XZVqJj;d#i}S z3Y8(C>pNOURryXmwMk!u6hm_o{R)`UJ|A6in#wj-k_H~~t4~9zLw@+n($!s55E%76 zrykA9a48*38QLU`fQbXvclqN!qlPpwO(u19TkrdNE-2$RgrTr z5Vzg5c-%?9oS##Q1?%QY(;y<21*3BvC+gVF`#Ok5&(*z3L_ph?cHa1G#L{}SEu|qr z?Wt66e&qU{u-tdcYEuovLPuBC??=wvy>XTWa>1(m_bEH9d$Thj^6oYSVx<#DDD^2S zjw5~R&osg5&=1q5GPQV&xoUM3@n%n1UHoHzJH+5+MKeQ)d6Y#y_qiV*tKz+SbP}MD zNdy%HPnXw@9%|(-+`vsRCTCadvN4fXpb~xRp^Z-3R`%)Fe0xEawjwnGBhw3%)s$Iw zs%EuABK+jEb+W+t<0immp?Wi06x;5|Yj*LRW(7bh6=`6ks{Y>sei){@=u;V`F+7vw zKBo-8uu(AHKzt_^+d?FFJMA`K+HZkotlm3e2&I;!Yt%Ih(TYnGqQWQoVSWU82VqkoP8n5}qqUCW!=T=Nx?>`Rvj(C2 z&PssX%(CebZTXw^@W9JVaOGpUJx-GyXK@za9??Xnl}eMSQW{%VNoy19$~=4^;l9mN zDM|BWHmXYshn@-@Bb!e*yVgAB{?GP(U$@kTR-&QGUA2(pX%VI+e-|`P#P!LkS}@1m zF??s9n$j^EoM?lzG2Ye$9{&J5C@v9NAP5ROb@h>oOtfn})CveF5RJjyj~%`k_I*X5 z&VYst%vprYmN63>1ElTix%RbT_6GuB+oxhjUh)0$?8@cx;JSxOda3DMOmX{Wcx(SV(9Qy|%gk0Gnez%(*j3S!U*_?G_3kZ`^+v_DfVu z*m5Oq13^2;(hPgi-URiGdt=(VbH8RvhgA(afcy=iPLqActMv26MAFY?e$v{D3WXT- z)S6AN)<*t4a9pdZvaC+Pcy@>$G1TI-I1x>vDY7WMse2iXxqDB;4NHTSn%bDF2GlMz z0L)@zn#MYLZ;H#vw5dNQ4PHUH;;NYc0JVhQyc5=%lmRrr(ijoqI`#9mKGHRmnezVR zdEgappxlq_e0$=hN+EPO@LfBLTXG`z{V?AM*k{J2y zLBXk)qeCnk{{W(fpzK{_$73dMJI{%sNtZ2CH$!zUKsOCfNF>|w31F~5n6%psn5fRB zMN}L*WGefFvMot_AH>rWfVs@ZYPZ|p{28Fih_+2!rkD~;!5Yr|*TZBuphZ~NA#14U zqr3rOeS~6$Rr2*`h&|Q^LGN#cr+iecQC0&~0Ppku2KcE__-#*!UtDU`0Wv!nk>|em z#RYa{0Fii+{pSynMyRzKPKAgu)bFz0fDMJz#oPe%j1l#WNj@v7D|A<1lXCj=ch>~4*7;-P0WC0zBm3S z4NaUBt5;7Ei41lsKYRIO+4Ra9v_u*}7X&EO#Yqd^H3L0jY;AvSW%br<(q)uZWnomq zed7@U?stGD@iAaA{{Ts^8zMnGiC(uQ!7Pl5<)^18(Gh9%Kj*Rjs-3zPKcJ2F##)~k zJI%Kjqm#p+15CX0L0C0(oJuVAQ{SQNh*QSbSaO5B)3k?d1)P7XIi8x%v zV_$-<9`SvE0y;-bCPj&AfT#zk-uwK;yf7*7%C4gh`iYzK5kI~jY9uPR6No|{oTPi+ z1Ww2K$FghYYveMInWq(=iDzF2saPo;xfNISxy5w*vrlQ}{{Tr_!1J~;lmnh6PxAE@ zeeE7y`hM6n)iZBWmR_Tj)YSFdY0_>*#kU?K2%62-N!)n(e6gH;HAr;`u&8{3r~}tX z+Z5`}3MojH0b2h6)J(U_Ps0mVm)Qpp1Yk5k9`IEFAC&gTw0)o%7KJeiq?7#ot&e8a zN6OM*W%Rze{3a!L_|J|qloj<x{FiHnwvLWR91x#%`qw zC>hK<4pHH8`pzh~zQAhK8hou76)GIM@Q^EV?c@AoGsH}`miL{$_BVreA6~d_5J1$F ztu`AS_7gt4e0Im)<`IUqrnHC$RO)$9LWrIFo~I2%MKpanoGzI?CBe6lx2ZVFqTlYc zJY1OO>uBjewliEYdcZJ)?{)y&<==S5S!vNL!cc>Ut`^_s0DfSJ!-Q6MX1H`9cPvbf zyN?m;wlYgkE{#o063tY9y1-wsw@d_f=aUAgZzTgveDR#jeVZ>)*WD7t+I`a_^u|xx z4MvSxsZN_zshq0(N}l%*O*5fy`HOsUPd0{|GW&^5ThCI4Km47S{kY16%v1r~sAkfF z--AmqJ{R!E+f62QQEgm{{UDL^0~vavr`{|PsepqvSd;r>F`3PpwMv8(xxk-%1sVWf zmzL)lD%xGu>J4RTLeF6&&f7%a6u5{uh~%?~+u`Bg%i)StY321)dXUuvD3E>9+s@wb z1E+n2<10@uo~d2jhH7SXdX<^*Ab0O!iVOb$@---e5fGSLZvuMui{m)t)~F|yI)AtM z!(Hv)_s6i=S*p=*{X~#|(YpK$`e6}Po|<;H`(G6!K}gm>-)|nVw%B~VR<{jRV{@dm z&*6iQYGzX_m5M}hH6oGb#ZT4yn?@+stC&i4D%8|s0KefRewfTxG-!~9?dkf&x!t9^=V(b0$be&RC32%&q?&f8g$l*=|ZPw z@tBE^-x-F|RXW%K;(oj_q|>F#xJ_QVK|ecTs?x7P*^@&JLBZ z9(#Pe@tax{`G~k5T?sy2@YUX>QBn$W?;!Qw-S);$G?jJJJg%ld?g;O;{U&kk47FU{ z14^3()Z4biLH+(@c;ct9nvO|UL25#+O6~{>c$GESZwX|83G5>H%d#qzN_v-+Nr9<= zT~?Wno>7dl4k8s@DzbMQ=`-=iwAE?8sOEdk_S+uA;sVY11*4>XD4y5|IAmC}7xcNs zT(v~@f|_*8(z>-mq$=xEVbGZozmJwyu;GeQ0|pm90{aYSTaL7wYHF^r;#j~WciUmt zq+>6@%cRqyl+8py+I{AYy>%JCdtqs{Jo8i}%S@jVZ~S95OrzDG#-WFi1O9Qmz`gqQ z$FkYeHAEQ_#ZQx~58Pu=rmHu_YKcuXF^P~r?f&@8W|UGbI$$09!H?S+ytPcy(x!@< z%ZDs?W232`>X?b0t%S>(RK+w~s*jIT^26&4HB#a!wa4k>k7scUS42h>_eHdMK-roj^!Z~aWP=`<_JZoC1M=dE z{$sMVk08W+Vn#BVY{S+y#D&+lQX~80WuC(s%a9&`(=1R!EQs5o_(x9+IesJ5amu6Z57UsH zVN{>ZFgNdby>>Z@ROBFWchFK#+pv3Rtq}rKMAe##b(t zm5ZX422$N3{qdP8mt9JVy(}VQzkFXfb(adNuqGo^?Y*WB{s#?78F`GaQQpKyZ|{$0 z70UguhOCs!a*q+|;KcX9tHcW;1+VEHv6-xyb$X`bPKX=oVl@8%d?d9qANra}@V8Fb zM^s}>N&c>$BhRnG_|DO4=oK0a34{2%eDO|!rVy~y)_qhP#dc6Yk#8Rl?|^BrFsMw6xCFyNwyS;; zLHWet)IAEE$B2V4AzPG*)JqBNU{6k5dnwRo3b7-0j{q8NBHi&*N@rm@sS(Rwi;GzNn-U-tp(+6&rJ@J#HOdzs?3=;wY3Oem0!uV8D?uCy#Bmivy zoq+C5<9Hpcv6iKz8x61Z>yK#FMxa_{fe^co6CYc6V<|0{meZuvI#$!}0tp@<$nSD7 zno~}im5;p^>I8NF0E~M*mw{Pc4Mq2LLrm}QPS?MQ##*szHG^+TrM#L*k1NM)aD{qo z)j)xzfFKY`Hl@@K;y~ZOTvDt^RtiR+i59&p#K2-OnBQBDnV=9PnZHnp!(~InabLpto_WYKRYV);+ z>*tSR(+4|Nv#I;KFSz_orAMD%-xVu=iEd5R!0OUyc-BiFU@ z?9{1Ag-G{(=-dM<+xh8%Qkt5a($!+|?#9vMIO68AuTrRB^k5Z8Ajs+CH$J#IW~|?B zX)mkX{Hzbw{V@ra%knE4&I3K=brF9kwkcF0sZ=&H0s8gB8AUAK^w{*pdO^~lrkql= z+7y0rkD&hmR+cKvy|>0)J*Sa>@gba5X?6ER!4jhSY%Td?0fSJckV1)NEpjFa6X!d6 z#wf5?ivUC$`eKuWYKRZ{r6a}~P8y{;D8p8wLXr<|z6bY+>xx8tC$e<@5hUo4dgH9)Atz4-|6Jm1@*N+G*Ye@{$%K1p-md{WFUGDS-?avjF0k1qcJ$87z2uG}hS013nc zt%vGk4JR)}O{~T{$@&<{Q9-DM3R*93(!}54>xV1$nRpb`Gt?x&3YR3GPTg@zstm%4 zYKL50Ji=5HHUdc(j~^T7(MDTcI!J0Ws0EZXOrE0i*A<0EqP3~iU3B7`&=i0W#Dc-K z9rYV%+)cVRSWw9tv8=08WhCk4X9`q~Zjx+!4;*FE162IlVm7$dzmtDB;fjady7+^A z{5SLKjNi3sD5A@VD8<0ID|%FGftMoe#`(rI#MJOPpO zI^!|y^v)ovYBsUW8iZOzZ{fdPqpmVknz9riXrjyko03ia7uS4N$_h=yZ+1X|iYZG| zx8)ypZa3v8!ft+EI2CfQnc*2{;R3&wOT>B${@Bc*6i84P?5RFjS!5{M$8VQRR76`y_e76Bp^s{@4H~RkNWta>_SndXn%3#Bzw(4r6ZkLQ5cI-#{k&5I!+I@m7m9T69yJ zN+|M?sFU;fVRLzPswnE=#-&PzNNS3}0}-*(&;UD$8^&uZQ`!|CjV9qzuFFoPPMj_x zyf0J+P1K#Wu^X=3E!@eeQ{Q%yqz|P*!(1XhAC2QO<1%iC}9I>x9(?r zdq-77JQg7xRZZ<7r3m%(Gkj!CYFSjLT+7S?K^pP0$ zvEt`zH6zQYk=D!<`0Y6V0H|hYeB_)JYz;-xV^o`058O6-Cuyw>786ckt=H zG8s`%Go-^>OZASx#E6*RwjF6zJRr9>-1PPO?S#ltqfyGJ8k$>wU(>c5IJsEi^Gt$3 zwW82QC{}9Kc$`8c3wDjZjx#wrS&YkZ>bfnAgc@Y@)oXo7-x>U?Q5jQ0N}zgrK{3r> z-aDJ0+Z3|TNlF!-BtEU6fjvAvrx`S+8l^+;`S_Udz-8t=K3vE7jAkpi*Ox%h!H}zrb zMA)qgzUXLXrT~7Sla@&-TZ# z3ZrKY$zezWHr?!edtu6ormLd3hFt}MnTti_+(C?S>2M1gIssl8({M|R5`U(~44X}- z8CnA@^#x(Xo-YpCqN^#;#BHQb@nTfqy3ozjs}dHfY5;6Ctp2AJ=uj0BriN!zsa_0R zPf6TZ9nKp!Lr+Nyw2i=@kIxxa<5C%xQh_xr84{~~SC8KUvQ%A4Mvu)lwlg)VsxY7~ z4!vN00B>>9Vlb=zRYGB^B9`d}1miN?I!?;Ko?g32>UvuS8PqLtCVu6kla@z}wys|K zff%J&QFhiNhBUzf>|9um;7RhFR?R+YqUC9x5RNTYdwswX!F){e{023r1yNkMr=%-C zy#h|={k}1Qnbw3NlIj{jxiJIx!qUv;-Ris=S`2~OY$Lw-jdVDwVw(!y&;WMS z4y*T381{BthH8bQQIiw2&u#HTEZK~=5{8~Y$xsq?7Um$pQ}|$SxZ57eroAM0H&!HQmZ8DJWpF z##<=9%%=4NHCJmLY6ZYLU`c@{PhIf&maO0so07`5lYN8_+kYHYaLG!IIXtDR4$Lih zC#D}wK@tgJ?_Y2_P45Gs-_I4*g?6{&8}^a>VCFv2MGSRRHvW(x`V3d$rXGqq;5)Fh({<|4C ztL;y}AxGS!TEG~vj}txnVU%Ywz^T$5uVN!fxNq7gANX6EqO!CVs0{P_Mz^LP) zB6r?mI%6r9`giV|+TjI*Bk2l`uQO@WXxOGJE{6)bk@emBW!xvc!$V zfjzs7f7LmK%hRQochym@@%fA$zukba5On#=*Co|_7WrSAofp3$(dp8c?zDQ9>bL^bIZXHZOl ze-Zwb>m6gQv8|#e+%Rqane)8=02rw1Yd6UF<1bK#3RVLG?0e{vv2CsAz47fIYf@TN zQHj@Hu%du$7>>RZ1J|wv8oJa4I`t8%`|Uncgv)RkE~z7x&H1&JpD{NbY%S9jX_(~i z{{R>{VaG?T;7me+EjQ9`th#4#cM>M}T&PM*r`;u5(RKm}`OZC~P3ppf1!@Lsd&y-W z1KeVrAhh5Uqonl%-2VWyM}UtkW?2<9Q#$}ps;hpj(+QeHomL!<_U{p}m>#?Dhd%ezV=5&Yt)`jf zJ{?D){c)Y3ye6wokTqG=-MY8EB7CE7;ysr2U0Bp7N;weUlmcQqbewCXUaL|Q#B_H$ zbx9PGh$J5c9}F#b2S%-~f8!a~gG!(hUNtXh{&&R_S7zk`7C$!knClqRO07B4y3vqq zCg;9C>TJbK$F%Ek!3c~1ALyq|PtO@{CkSc~Iv)NQ{{W`k0hm|i4N6k}^DRUl5Fh8| zha*iu4>=YcW}WyK_44n&`LDK9-5!}pp)|bG3{Sx{i{3njHNos zR%vq%Sv@EE2?xx3-EmHur6vKjvMMVR_*hdwN5@onVy;&w#8MWaoAOBj+GnvQPVpZk zrW(3aRVblKsXpO?nD>~8=siv9=2D$aMoC>dN9N(cB=o+M@WnlKpYoyU5x>sU`eW>>!%dGb zz1)k(af(d}C^sotj~MBHA=d%^(@_BTm5BM1)5{1Abz+|FIa3{4C5NWg#&)CAsI?U( zYFaz#g+B-wOs5*372845%ooILzPQTdN{%z~7Fj|0xoy10)6*F)B|a;>p?bWl`I7wJ zjp>FC=EtDz)C^ToC;2#fnZ*ggV6>L0;_BF|kO%P-g@Z?KpT;wO(4&#BOjlOXQm6w! z-UP8bSS|O)s##F)P6}1$)%k;f<9`Mnv5^g3xSDx25>5TkY(9AQhbg4~H}L~;@Wnj{ zNSK&`;BSJ0n5j`jLYF?6&(o%J6%Ls^N|Jx8Q9rIJjv`Ik}V!Kn}Xb6;& znJb|w(+p%x#Y&FO&)!FDeW2zjkm{h)f5LfTgLLuI-z;Uy*Hxwe0MQRn6W3E-2dVVN zs`WJ@7=FFbBbJ{qgMjh4S^8jE1Yf<{EDvR-9-J z?uft9!O>5i+^Thd?noj8U$?^)X0roOXiYOpZQcjYXTOdPR8lO}8;D3{1awj)eE$GU zNEKSw-f>E+(jucP1$Nwz0TJ=VPFgyrOE3j}Nc~1CRp6}~jv(BAE5iUrW^Ek@pD%7CAY3Hcb z%y?6=+s;h(z^AU6T406LN&cnj^ZhV$TsK+~%xS4};z|8u8Q6OCQeZ0TzeXY$-@5#9 zQ7Sz&rAtK4ooC+D&~}TR&s0@}9WN zp-jy%C!Kk=AQ`y%Pfy<&s#Qi+(wDzcS!KU1j^3U)85)AC8nj5%Sb|3K&2jVFwm9MtD0 zVgQu*OaB1IGF3p#pa#S^K3Ylru|_!N2)(1ZpYM#xfWR{|m*LY<7iS?z9vwgD9?CUY zYSVxfD(O@r@XD;reMr;cisMPrd2Da%iid+t)hE7*zM?Pin_`}Rky=$rI?+Ki*-nR= zjyJRbzdpEZ-8|JkB{kANFGV12`kW?>bsfwtFs^6u4kE6R;%Cbl+O;ZH4jkA9L_Xq( zShrob>xx;0&1Gq~;t;RkKp(ibo*yfk9^B#p&z7u+B%VYjhYoG|7)bDm#Sd#2Y~2^6 zkHV8o^(r5S4e$r9_^&`_XsewoO+jZ{@)pyoCTvUrxsls!y;DmtGGH`tk*S0EG?_mF zv5Fa!#rr*=PNnevXX@CfsK$}%u|2f!e*XYoxxq=%5NT#wr(?{z2)OB_?~iITeTJP# zZ6>nni|+zYtp1qGXhfjoS>&wuJd^uj>#aY#Jg0uvz@&{?olKS&^i4zbwlj1Zs9iT{ zTaJr2p1<*ef{;KB`3&|VK32wQDN>aZ^=dd}JpciVZ{lYeJxxuSWsaSPLk7nR{{Uzy zTVklTZn}Xp=i)kaz+y+gAx7AG)Xrf>n*o10>9*tc#&bJd`)P9dOtMs_QA#ORl;R;* zZ9&4w;uAVVXE!6Sb+2TTgstGQ14n z4)z-KnAEML2?8U(>Env&H>Cs1apUwcoTpZy%Y~_>>9N$t{n$KM^fS_Z1~R2GO3hNb zl18mUM^zltJC@&8!0omw`$H+2uDKtyMq;%$sY~*eKpF1@4Is&r`;2EPQ_TugXw<0` z1BQ7TUtCNLBV+FEX|c8{(M6+1j~rB}l*o7iXPB@MMC$VGjIMCds?-jh40_Dlw={L_ zh00P~p>y|-SvACsAr@Fz37w6}yc-K*tqE1C59ClkruQB_{36!E=*odXJ;z`}Hs!Rd zK`^$0dJFg5)x9JN(s{P@Kfj(6BUYVk&0R*CIW)P5)Ok%%M%=tjB$7SZ6RP6B5M}C7 za2E8}RXG~;sx+xHV=!jY3>+;i%}R+TXi!%G5-LFJp$QnI-aZ@O?a@Fh}!sl942JDL5AyfzfXik#pesgl*^YoG$iVS zW*-jSKfQCOIY%u`Sc*jgo!9>WP4Bl?J4a7inP+OO)d-u%fa$;2h5_KawYY0Zi3i@+ zgOUh~es1H_8GOR9PG#2nuWr5j_?#^?<|LELtD#@ge6NSeajWqhx}xjJpV&Yqrlpp; z9S5%1%c|22M3W6miW~B=0GsY42>4DQ(xN2VUM4nZQ6Jm4fW<0Wl%0%k*W-t0HL5%V zpuCp25Lk8a+V+ekFSZG-NnJG%h)ip^)3%*TBIHDy_+Wb{EkiN(v!xc58nh`cfjaXd zl`_Gi)~3ofv)^+x$WUpxg;GGY83gqB&MR`bs79lxUvnG&n9byv)fuWJifqSw08cR< zn`7C{TQH46>-Tymu=DYO>5AM+55v^f<=?cA-26@%n=?siQvs?9gNn!SX;3w({{VKG z9lJ&-)k;pQQ&f#s-g$@<{@)44a*(M_8FwyZw{0*0`26scDrivG(hEWShNBL>r&9Re z6jXyGn}&<^RqOTJ2CYDJo1u+B+z#>E$HRPAN=%}=Xs5$drHY0GfjVL~eZS)LQk_&= z1x6!i00xiH&NDQt9%T{kEx#y*1Ih=(6sXgrqO)^V{Kn)PSRW|(2*p>{^Y!&O;BhVS z^2TbNY8kp|N`aQD0eca`w=%}Ze>mR?Ic@n=$AEx-JL5T6aN%cG#C9Y{{NkSzjGC;6 zr%t^wU4vE(z&u=ovH0UCitK8E0i?%K+P4=CK=D49sqY`x*BJ^eoNDss5}MPP+IkN5UYPcU zT8QgK_D~f7{6GOMzU>=t;g7dhP8~5SIYUf?9$q5bZSrrk)LMoc?==O}V5Fdu!>q~u zFu7`{OHJ%4TNb9u$f(c;z*TH`_kX+tvIxubrVqnkO`YKQLMS#2Bd zb0=$UhRsqnsL~Y|Q%DP{7{2fUgVMxWZN4&@J!G|U#9dLt-afyaQp!-Ip_O=j?Y=#$ zr5KembvN^mW|j^~uxfYT-eWP|;t!F&4pNa(P^j16#~HLJ%5SHAtv-HzF-6Tba@5%B zUD$|a9MHYJ=HJ5^imOQ%N#>};f!rHFBF6T@BZWgzZg&zoos4a7m!2zA;?t`_ngV!7 zLRd*rscvi!uGq@W46;+r%_@P|=(|WJcR<7EhtJcdQ5)SQOu^WAd0}(8esRQKyH#ab z-hJ2|Pk7q%wkrUwsvsCW_k+{V6EmdVI~-FQ3$<4}fDf;`X|S1}pw$AYG~7D_r2Hq# zY*bfuZ|gEokn`{7{H9f(NiXi>Jf@XWNzpfm?ia_`85d!ZT8G}|Rp}#K2JoBwH^yT( z_GnN*)TNkjt6bQh0q~jM3z$}>&r~p`ie5*$PMKBJ(CPpi9q~cF*Ij~*B}Sf#13ZRBo0kJ}lEty!8hu+t-OVMY3UKa6FZ z2Wn0Y)am~KP`=UHN6!sGsuXEFzlX!le{5&+>Gp79Oc33%OmHiLr%aaw?e79H?F!m+ zbZgQlT{_9b(xsR`5p;nBOrH*?13OnMQimf=Rvb-Spcaistn(6M(rty#3b}Ine+ldG z=r_hcDHegH7JbT6s#pix{hZ2 zv88@-@iBZQRH~*-8TWUA)3o%$1zBV6fKSSCn5jw78h5xP`Co5H^v1PwsdRzaL|%W3 ze;7J6DFP#Zh5jAzuumx8(;13X-%+W}1j+CcEreB6rl9g7t8YA-bsfENnXAXB>~1wM z^0|TVj8)EP$W+T6-KmA`q?Xk_JgXSXK4UGSx*=mt)c*jfVfA`xQpEUq;*YeGGfY*a z)X8sp4?1)pfzoHw!yJ08%lLHyZF2_S@rnS~8o-Gp7TyT4?cc(FIH0t~cS~J@RD&=l znos?r+jR27W;C%yQegdg;;v<@W$G^s>QStJr&Z1TFzVG?l?{)6v)|7dFJwFC>Q`Wk zm!qPsBIFar6)EWeAA6~fA=u)!X0>eSq$+D=)lv@2t%&~r04!1jWoQq@!%bZz#qIU{ zRHK)s{6d3Ur+*0AKBsP&6+);5M`0tyZ}Z=$hA3$|n@{7lfR#c-#rGq<`r|V}cr4)D zw$tG|Vuo*nON2zh)Uo+`k3tW}6!UFbg)-EbX?YWKq%$b<<^KRr!)((kih6c+4a?m0 z@FQ4Oq}2VebSh57ukW23ER^JnuG8`P-+_jMwOX^r=;5U z!YxWZ(4)rpoRK492w}_9uS{o}ojFQHb&)J}3r+3X#~Ei{GA&kZCn`j#{6XXbeNDQI zdqTerhF>n6AUWK|lGCs~PM;D;IHcOtA(%YC-^$)y@s{>>Q!4M6BBe@|3KHJ;Z~&Px zGhx0CW}Psrco78tQ(-f=Y-VAAGByxN@b82~dE#uG%lW5&I5BRRqQUANK>MT)!uB2j zbm_m2Gp@?oMC?KBu^VmlJ7R*hJjzOzS%y*xFh@b&@;t%ojK>C>)213c0o-m5kZC)?|)OUzre}9%Cv#j)~ zoq7b=>W~he=Y9NsX0nXG4w{A`LIh|G>7ER2N@sCk|hBDcuP}M4F0F_k)#n|5& z#nqRrs-Z^=$Oh27-yXvV%z&@{oX`8n6YBtV+XMrDU#sKT<5p>md$f}ARd}cY@P`<9CY%_hDroB3*ilPE~vIYmxwlg%@ z-RidNbz1g#ZkS3dR+1);8C#YE@c4JXOJOq10MK=+usq#h?d6QnkWk9JPM*ERy#5#! z^wCjJYHc?10FQ~rR<$~`8$xJpKr)joEYb0Zi}&k=YLwoX3(HZUvDtLr`JWGtGi(cL z7wRLe_V{3Ca~ZE|(G+QYmo6%bbrLme3$I;9Bi+{002>ar#%Ch0Q7bhgMw3hEXY@V?3rLZhxmw(7>vY)K~uiPe}N#q4LMI{{Zsf>D8p4(0=0{&Q?`g&{vGB ze;&S*d{q>gPOgBRN2UbRP*H?4PcLZ0xK%@WCzjnud)oxFX90^dT7S}}{{S3KL6@lH zg^{EzUPZM|{yvzhYt*K*spC?t_L-jxoBVN>%RNDQfeeDXFzPMd_zS5Do}viGZz_oB zqRV#c69jadm<#aqbSof&J5D{3rkz@*Zc#w^&NFLO=(K4hGLQ+2O~=6FG|fvdMwE?5 z-i!1WC)eaK7fIAdjx-X9SG<4nS;No>D=8$kLae@o$o~N4fb^+lS?b$CBl@i-!yjm? znyw4eDNY8MlV#GV+iuDxEGMorRgr$}8~*@I86qUK`-7!X& z4y!jxq?bM*Z{vOZ&#htZ>G8aGpZ@@~eEg~DuP;w}b##Rw6|fRX{{T?iNf&~9(ljW& zN~{5@J?H~}+jF)n=F+7eTM`D?$qd82G7M2dNhUeijUi^+Zx`H{>xy;Rb5$uZ5X~ac zWJ-(m{_ruK0MTtjrrjp^%V&anJTC#6lvGrWw$!)y`iRCuG}*7TW$GC$iw1fIz7mR} z!0F3#;f_{nz|Y+&U_4Af{V_ovtF=o70rZZcw5R_7)_>;>RJ{ish}*?u_5JZ%jY@*( zKKMU50DLj*(NYf=@Ec!o)Z69L8G7q1Dj(Au{ptSz>M#@8dU(LcwQ7R7VwYxp4m|ex zK*nDvGSOPA89GQ!y)ArJ%(-1pASv6vudkjeD%4&=OzCO8iGv+>#ZPraK^C6-4URtb zQA#lcM1F^%>1+_x>J7C5WyZsCBeYLJjPn$!me!0^SH6$LtUAY=hdqXHjmeZ?Dbt9*N`5a}cejNtd)vvIHV1HxW?TXo28MR(2 zh^J9%A-K}Y01Pydaq}3<(y7e4xloxK$&)@iK>9eK0QVMT!(5yCliw9UR3Dpuww|9| z@j#_#Y78e#Jlt+Av~ET#P^XnaPK7=aYI=m~c7~{R)H3Ym=J7DUgaK@2XmO2u!fNGj z&CCq4nI>9HmNy&k>R0o~?l^rZ3nZ6FZ!H zIXE>l74>syWmsiunN2Qz8Yv_OdbIHgO9 zUG>7H`$@Rz7|R8cvF{N#KAmx#q(iJ)oXl^3J^GtWUv04bZZ#IB(3-~)VSULYjlUOt z#8`{XdPr%2Q>fEn*1&D3-)Y053SbNFRQdRNVx@7H9P~w07a;VD%G-P=v?ynEB{J0N z0eWq*@t^qkT-T$nfbg4-^!4~|ag`d7p%$>83<_N27LdB5_WJO_01S}#Sam(>MCmr_$YT&=2Pem`b_C%e^vbl++u$>771v z^TuEb3{`B7!I)Y}0@v~<_{9mTuBvai>VCJyN8nWd09RAC_db|8@Y%sZBZd`0kN%`f z8SVIR%_ta&QMvyBP5%IC^2f5O&iSQ)=qjwoCM) ze-FO8A}tEH?T>8dC?-JVTz%tIqVwt-Q1B;IW7w-&r?J(lPLlNvA=C|LT>k*5?bh%C=QqXIN_7L2IG`rW?*?F`B9#M`IJv<0y5rQFK5Mlc|g` zyomT8(-kv_sMP5xEDhjTjlWzyDz!3W^A^-R7EDjivS93@oDR;($v`F2(A0~MXm zo-?)9jVBs)RBJ(t0djZr+Z;AZw6g|Rnw>~asW%-r>3GMqD`p^lm=+BZqu$V}OviXE zTlg3Db!BR1OfUjL-uPlm9Xpd4(m^4&m>#`6@kD6?M_guV4F;C3AH4m6h(2>;0xybn znJE{jBmrwnnYcZmdHGHuH1rmqi%%&j;3!832cqI%5vIy>rn1lG$l$yc;)9G zhjG3#sZ}UwR8c{LLM-Z8oxFD4zH=*0IhLe`)Qw1fD00I7Aoej4<%Q6+DoaTWb!i~7 zy@r#2!);%^@T#*dC;o9xh)ujloC_^VW{Izn$SPeBL!-q`=d06-7`0|5a5 z0s;X90s#X9009*O0|WsD5)%;=1^^WzBOxR)7BVvjB|$<_G(%EhVsU{cHAGVvRAX|H zlELut|Jncu0RaF8KLY%Ws;~QVi+>zGo0j*?eMsqD-3R(|UG}S6X6Wn*A?T?uQrGVFfWe(1jJ#9d&%reM3lFV>vmuu%v%^* zC*;B`dt&DwF_@VU?Uv`a?1oMOKwvx^P?;#nZu-M3MKluFUg7fnZAQ3Qo`p*;XEQtt zr`@63s9!sdvjx%&MLQbU>LhF{_7}d93~(QJ@(Q2!yLa}H(CyC+A7Xw#E!6#mL~hlg zmSpvBJ!R1FA78Kf@OknRoA^lcus`1XY8STz0WST`s88KV*~=C7SMl;M#VZ36y=xi6<8;)JaDGQJXigdU zw$iYW*0}ds?apV2p>r3O)1g&?$4DTjV&2cZ>aj_vpOP@~{GdfABJ`6za595E4M~UaV zmp3|c`v`0L2xB#Bzx>LdN3M|uui;ghhM}qZAZ9^KvYWc-V$QB-N1No$`lm~jrf6sR znwiRGPf{F2#WYL#H7uR(O)LKZBGAIj-2VWy_vhHX&yV|*VoN{%`#y90d~fQhfBFrd z+w<}9o?u=$8}t2nr-it`FZlj{KmG6d9r(|kFaF@?&(DvR{{YYU{{ZLz0J)z(AEw+$ zqwBGA$@u=?1+prZ1BlsHIfz)WKiJj?sFm%V3)1-WHl_mzv$a1)Ltj8#-JSh>Su*Ub z+FoM{1`wY&6rf`HvxkU8=L%v>L0|2o)T^7_lQ@ex@3tnihQ2!x|SD&XDG>2%ixz45T6Z!x@8{zFuo{{TK0tc*C#UoWoWV9np%x3060D0$C&4} z_I}yYsqq?q&8I_@;_*d8L%9fiTXpXMhB(l_l$rT~qE?{^kYR~PSSp+?Usg>LMKs?>J$Iz&SqVqcuH8kb z!_iCDS?3)-8M=8|$DL2CnC9(%emVTNe)R$)xEcQdA3wx?GY2!{cYHJF_2OU1es?r? z`Tqd;x99wBZhw#e02ch`{{SDZ-xX8SN2{Ff-{FhoX_rgd^igVyJ;maaQ4ujm{>?v^tYG`n zbq3V(DEl6s&5CYQL}i#?3sxu8UT<-4LYIzE(o=18oi|FY;P$yx!94t%dE~-qo1r?b z))fB$S7cTjlO1DWD#7qz0PWL{>pCBfeT7h}%I?SILd>6UeP>p?oh+s06}sD47FX0< z;I#ORGPUBv#PxVx*3Nv-#hLd`uvWv==y_>L&PEdI@|6}1i>8%K=N5owE9A>uG#Aj~ zpt4nr``7;D`2i?jv=-@6lE#mDP}9{L6d=O$%+eZd4ik8Yqo9_dSQRgo>K~nM%T+?P zWIkhAATffu(ixfX3jY8dZ_4AXbxSS#&gL&XwopvT$@19*{4>U;`?@nQ6^mkeI(|Rd z`hVvGF45Up!VXB(bIRq_WY7i5NkRk&V(e@ zLd6Bvb-mU8*uSW*d@H1s+!OFyn1f)Y{{X1}01f;iE$YUp=jr}J?P59ZWbH?sxU%Uo zP=zXS14nEXyP)%^Q(B#P1bX`}U`qZZ= zt9lr1t3=pr2NM#?Xgb}PB6BgVO@)L%U+Yxq(UH&9hE3P`JlQqIkaKkW+d$BkG(VrU zjA#Or=G4x=W=w_kVK{j>lvvCDv&v`3L%kzNQ|%aoJU!qBPsHOTpb5ahJQt!zxO44e zxs0kZ^gD>dViac5IGOi8-1Un~2kQELL^#Id7y9Ab%kBL%P7I^=@=$7>V+knN=eBlb zEF|MfVCu4z$febh;FD_`q3nK1xC$I<;Z8P;b*t8;Bd#nzdID32ND<80&Yh(!dH(=Y z7JWEp<}bK~2J?BeLu}T4h4?F3FGTY6x9Os_JIIWjEcGJ{ocevAs(IZ{$I-TmnM}I&PKYtn3ng+JV=%@l zwXoQP()A9=YJ0YMY4@{<$DZj~&XhJK8gI#}p^HGa@xYz>9C?nZ2G{S@9JDbC6Uq>p zdHmWrj;ploxlq;r05SvQ=vOswLHK-@sM$o~u~z&Zsi(fbAjWdLoQ-s{^qpkA_9TX% zw(N0|5z=qLnV--0nv2Q7EbDRl(}Ad}lr3LomW?z#_e8u0SLr_|D@=TRjG4Y$gXAgz z@~7-%d^Dzbe!~|gEoO!|AK)*m)NDWe{;}~;d>IBHSh4Wm7&>Oz@TAOLEPl1+l+YS= z;&B8b+gSC`5>7wk%hn#%{Ck(u{GR$+F13$fY|p^adp+BX+-^8%aLNbxkZ;kGrGGXU9pqO@CweH~ZD<+0mo))=~Y7 zeS58^eG=)Ue+8PeO|R;WOO7><*TpZVROcVqsilBpXyMeLqz zZnIQ`o@e$4z!?v(R9`FHCpYB0uvpaSw7!iG3tKe`f7(qZ)7kNkH((YwLs`B;m1z4X z+`-RCG`!EMV~k=+IZD^!pC2Ixr{a%;7kDs)40n9X)#N~e)A4sYE~hFt489h`x)(A1 zYghe_tP6Gir;~%{T1-Z-hc-(qR9ji%hfD+V zZjVhLlk<#n7^Y0|#Uv)l&T+4zsijcAj4jQ4o5 zs&>y_VIB%iW%{R;X3Z9>J>H^u2ac1NrU>K9FO_qo1`)k%QKvl{GXegz53t+?r-fDE|}(*-zblO-%SWz1sD07CwJDzxrH~;=&{zz z)vK10s&k5zW){zxi7LtRu9?YA){YACRQxHJST&4&t14691rohrEtXDq*Fs5-nlvc# zVMBCHA?v}aiSra*PHC)vJ?4d>)Muu=OR^(JMvLl_iqe3C-s;JX>{U@Mp!R)?XSp{PsrP zg4~5-$LlG-aSSQgBG~FdTQ9k8SCmq=naQZ~AI7^q0uYn&XOXEO6%Abuf8>?(8FhDb z0*@wAcW4;~&M91(THCZxOK5)Y+v9O#hR$o@%AX()JiUwkz@9a%S?Rx-`1e1tOq~_% zY|nBBT~m4d@6`Pe1jWr?Xd95;IJ*-}WNg2|2{1m=gkH}BH zzGVF$D74k|X-=p1s>yW>s;PTpCYF5^`!p;Xe49I7(fX9~{{SMOQ*S2M&&^1!4nyNL z%9@oO5V4eKzJxjTYC+BTYA!Q{PY#XHDPT~({+c9kO(F?dR%1>BV*Z?=M7w<)PuN(t~y z)2fCWXHU1o!2{0aoqv*bO>XHUknHD06f+;eRYrWWWcC_PXMiF#uylMYtHeMBq4_5b zu$y~4M^7hEB|BNjlh@v-rFs-~tmL+MI@1CIfNI$*<=40DlJ~Pe_ukCq(A%f&^qnrD(0{f+0Z#*mXi$=TG6 z3jspQ_~=?Y{{Vl~{)FqKeZ?s*OXH*Q<)t}Z^Kv?v?u6o!85=*HqvMg$Y+sZ0j!&wk z4Ifph3Fq3=qbEaV&uw=uT6zduxvX!AH)dcyMkTjT`C|E9l&;*Ex#eXFCsRbFF30yq zZ?vLEzn)jGz{acwVK@9xjUyggR4MCy`ybS1mKWKl?3DhJJo*0s5-lEu{{WI7fc_uk z&vjp!apI=eT+VkhyTgrsU%RO+2d35d5VS6nn>DgEqyGSsFp)*zlsnz9)k9jB8tjqp z@$z)+59-zW^?zRj%u?9JQ-rNaEmNa2-)<_D}=RxczuZ zP)r2TA98o(?YjrxSnd%5l1fPUo*P~5_4-Qhex(x-W;a`ooir~Nn_M5aWbh?vUm&Vw z3Z-P0w`scMbNT?YFB}i2BOZ927dkNZ9y@vg+81ro{ydqu_Qb_ZeExSi;j+CS4@%tm z8qb6|=%dyPV9?SRmX}BHo^L<*+B3b=;{HKJ7box2KIPES)5l#}N@LVS?3fOXcm3*F zc>e%S^FI#!SE1Vdx&)GcC1G6k#nb5F2)0S4^Dq6fGEF0lGxT&ky`?tdme%|!0JZ>s z_9cIxkAcnk_|E~s`Tqc&{{Wwl_}%%>)mp*zs6gALg`uk@kC2j^;Kl~#=lqT|zt4_| zVQ=f7Jol|t`jE!aF)Q67r~`83NA;KYLIhOjRbN=1+fiayC4r~*tIW&E_fbeXI1#vhVJngfcKnmrgEe({$@ z6_ztTTDFgkCci%r)7<|6v_~?VKPB$~GoZ@ocM?<+1EAQ1KrxE&+}xa8Zya-sRG&%# zjn8OdjQtulT&U|Khm{~`$vJPz@xDLdl~L;cp?zl80PB5PprL`hsJuy}<}5x(598f& z=7Jb|y1Zx3_z(PlfceFb!47evUJ+Q|uuQMOltCJI0dlM7FpV#BN&f%@OvU;$*XzbR z59j{?tG_$`Kh9=+d})82{{WBvfBKpJe^amH;1H#T56)-HFh}Qif6kWSX=j)J06!l; z@H_EGIv`dIwu>nZyF2r~Gfv7qV3a{&jlb+h{i=f%=-yHa;b8M28O zu~$2)O4q3%Iqa>x<~uRdr(aXK$y3b3F`ok0(L43(rZ1GbwWHA43=QZgdl8s>zT%C0 ztY3>>rsv5^W-zjaK(KW1a|&@zYl{Y0L3*6B+W z^zwS7apA{7*sw#U*+JNwHkB5I0ZzGkj3DtX_3r*@A@aJ+{8KCD-N{tNrS$&aG(Ua# zE`gztaTob5!~n9_QYj75Pc-oee(@9A(Aiucp7>V(a#|p&o9DoYybQ zok>m>&~d18fBnC~>l59W_Sxn9YpvU>tUWLGAvsY($ik%h;m%w6s=8(UQCGC8u!t0* zqNUo&xBmc6c@)Gf8M?Fc zxqY66(X00x`x493x!x%SElhO_(gOh;4MM$Mtq@i(*VIO z0}B44-ByD71GVc3swc7)AEB#J3I2_8{{Z>>tr+vw5mdRdDA_|gB`_U&SkBJ z<S#}76Hf#l&)TTwn;#j2pYx{UaH@f|<|0(Fvr4mecuoClT9f;Is1zge zD;UjL{{TzBdx~iOpGH-VkCh)rR!L9J{a4}4Em>0Kp7XT4d`bw%M9bh5Y*NMl01m)s z`1kvTWST@ zp?T4x)3{N6S~JIB{8z09Q}?-lB9o05DOEttvP>H|%VCWQ5$DpYSW(X-bP|x{ZH3VK zYd|ZX_?!8G$A_nPH}X!;1}<_D1b<8B6qLg<*tK)^h>o06p6*c4N&0>uvDQoY*CN1Y zZ>HTugvAH1l-V=wtE+FHlE^0Xw+XPSonvi?E0J!(pXvHt*D8Zvaa*c=YFs29>#ysM79)_XlHa;90StKWqEw4u6K zEF+b~=Xq1h=-($dp({XZhL))nCEFs{8>r3xc~ssMp~&_k%wU}U0Ny|$zg}3e{KKI{ zRKA2ex7X%%Iy5GJcO+kL?zvGEnY0^m2uO!L$)Lq@&B^!d-po-~`&|=u>n>?>Yz|u< zoFHE!VmJ3;b3dixvr?T;?T&J=#S>ZJnks)=z3OY^l7i@$P>i5|m$#@xOJiq0&@!TFOz*QS5sbB@+yCw3Bdirsi`r z1Noj$NHeSX8?hgh&CC{ZCIeezvz-Nq>vR6*Zkdc`j$b+W(t+-WwynvlC|OdVMG6J* zIXxJQ7ArepViKzyyy%W$ok}+OktM)PeT-4Oetge3-6l2}czjM(I<)371t|6aN9(gz zjF{`TZhx*=WMMbK!pbzYf$Qn_zcYboZ{+^~WKk0kue(2!)sUY1r zS5m5%ocgylLAPClWQIowKdL87woX=$RAHHDD2J_X<{Ce)9$Bk~O>d6T) zzSVx_O%r*i@&^^NoPpJ1k@8-q=RtF!!qqz9?S4IPmr$#s#kA(>?g>~m9t!IFY$<9_EO zKON^oBiM@1jN2mjb%k3x$;_f*{_LF_<|ZBrYRFfVIYIhoD-YGsMW(=tS5k6KOndCs zt>WkSePRbM9@Za#pq}Qf7zSUr+TF6nlM!!PL9F?_vz{)6ds3>=4-|(oCDbHQXTXCn zO-^1R$eAwp?hN1C>;9Fj18}JHn*@D-EcGC_bXoq#!ocV+7IeEbDn5FmZ#3@Ks}|o` zdydpGtp5OB1Zw=!){1q$L-3Smvje;P1${K5qST^?Lncp3?D}b*niB6M{eQUTSN`;+ ztx*wMI5b|3KJ3d8^h?lcCk2xlDqo7pY4$t&Jk`3zKiR%ZXtjD;8T0!803YKBf0xJ4 z{O|t&i$D9HAN^nce@(2k*48>qo5dkPmW#61XQ_bU-zjCI56uh!9mbgGj_2tewlQD6 zy0ZGj6P`g*SYS+=;N8c;;7s|Mh&w(vOVtTMhS;r-rHY!?I_OZw9IJ!tC2nEYPx}>` z%}G++|az8}PXv!H=gMNiz*oT=;5 zr=YP;uzYY8N|W=7!Vl|XJuOo|&qMK^>kc2td2lxZzNM>Z>RkR?=aKab8}S|tpE%;B zxhOl~Ni~Lt>V(xJ64blkz=P*M$Nmz+AL43vjutQomO;8;L^*}}`wXE0vN*gH(w|R# zV}-35?Dq+RXQx&h@H?$rHEM6wFdhQxN&%m=Hq$lf7Vq4->-BHW297IY`B?t|-TjBV z{B(a)`GJBP{Qa0a{DeHez1xP){(pv#$H$fy2+tXKRZ7T5aElIg=I@ik2Q&LO zcR@w-^0M+g_}`5EZ_ob#Q~dt`^?pD6dH(?7{{ZlRkNEvx$^3eLjeQqbud{W2sift` zd7VecJ7_Ou1>n%-A_;laOCzstQo- z{$ETdcb2_R==#5LWe9qQ?6(#KD@@=NU-(rtAzYGH^)-wy^{CfQ5_=t!Wcy3p^v;fP z>v}u$_5>Xx8tKwiTg2yH^e4{gdA&4$zB~2R3U*qJeGbc+Q&^!Dt#r`B0Rl>pKcL`! zZ~fwt;yKQs(q6_U*(m7?tOCbWk7irB{5~swdmHQc4-RVSgP%2$fF!Dko+u+pXzQ1x z0a^;36~5&fNPYYAviQXibolR$QG~Qp&eHwS?i;RGx_wW$xzPkj7KZM#*wrzcoDx0lWtN?oP+{2bq~ZYe);@$#58J&uX~>Cx%^ z8YJf-gc0-9z>{it6T!xhh$gaLQq%)M_!d2J8tNN*?uyCcMJH)SrJL?_JR6 z^}7E6m{DQGqMEHe*PQL=+EwoQ*sYN@NGj>RSU&R=^;y65)Yj6jj~O3%aK|)tuFkjP zIy5Od-%&K(M#mJ5MgrPD`BDAMX6^?z>B)xh0Zg=}(x^E?0u zRIeK@Ej73L{wc}loXLV%1i_>?C1S@3fzqZEp0=ya_59gcL}sjVpmo~ZMk^imXkC1Un|dwGOn<$>nK zD%njBJ$3Bn{tNh8DQcNa-?`It-i|#VTORJL_mSkik4H5jH<3)GGR~Rm{)zIxI?C#YQvcl5Hxa|%={jb!39xg5q7jn~q-C+O~k$f$o0?8pkN!<|n`#20{d zGfI0P88DUvu{P}P=oV-GMAbMAU<0MqFI0Dq66A!R&HVZtbvvm}5RU4D%_FZEfW3Iw*#;B;)Hx1bLS zmrAamDtZ|&tJHrjO#+n8IoLW@2##%Df^{ohrsX(q@#v;4{-8y<|Uwp_)*d@x}OFAhPHV*{r5 zrlAh)%gDno@*m6E|ewxR`R;QCI$I8HHztvUJpb zJ%33@(dqS?$oahWc$lrij5HTh?+4bOo|xhG*#7{m!IffW)gQ>IoPMWIpZlQvSkwG> z&w0~LdOTVlr9891`QB;-ol+mWH^}Sgro3l}axwAG`2qm8ar(rO`jwC2Oig&esydQ!806thbpF5l3pW{D}{Ag#+ z{{WAV)m-kLvnZ5O=~!;6V831V3Zh_{`Jb!FXF9ZvWJXkE>1yZ1L)AyFUgt-lsyVsY z=^Icov)|IHWdSVygPSM&Zd2<20C-iH!T$in`z!&mPzx`%b{-;ngd4g*Ox(d(i&>N- zJz0f9z(-N@uFm{(OQmqDWyL;KjCXOB}-cTz8m&yCiCpRj&DLLS2Uc-`S0@CV@A6%6sD1% zR@^Y5wV8iLQr3L2OYl&#g?gj}tbd`*pWZ(LBY=NX+MvS!0Gz{Sc`%Daf9(BM{{SzQ z;p&n90NVPK`~ye-0O!H>7ykesCBLtdEdK!N{{a5y=^fA6y>G2jFnOv{*uGGv6Q3lO z8QAZAJx}Sd+91nv;KhXvbGLs>=eH>80mA7S#+C=#{)imDx}{8$sJH!qKV3V%A_+FK zf=~;Lc_!{fe4VL|SYJmy@u7f`z<#Sq3id4wVCTyJ0G^KMe18uR=0Ph4Tt%(*`}Bh; z=QEQ@wJ-H&kNX&PTpt<{JGG2v;0xFmKPcUL`hKw}Hjii2g$}OZR%-&37^Rj)>-(sx?6PIcv=k7f8A($@l~XWxIRIL;Jn>81Lgk!Eo9UYpwgMid#-MM zr6#W|^YdGlQAU_y+VWB-ErB6lKl?d+cdX^ZW(48vzQx z*T&gCdb0_-I)|T??M`HJfd2q#pRAUR-TwgS56EKt%lUuyqI>cBM#oH|5jzp?N`AvW z-0LLPXN)}@4E8Wp^DndD4-d;7xTENkc#hg4{hzHrOl%2GCj(fZ7DBrv2K}Or=dqHg zv0UT=tehXd&&fIceua=p!*R~@=+W-ieL<37>lI<1v5%y~uw_o3f18`j$%fu>`1gN0 zmPq=)xy&=k(xcf4@mtC#a$JT6z~{C$7=-RweOqYst!&yD^QZJ3RMq>1oi$t?n~j?j z2|s_-@@WkC?%0JZoR}VctvA&&2h8o7GbXpGf zxcvz&0ru$T^uHnr__M?GR@FzMd^6K)H|1&{Q5T4Z!5Qzy0D{ye_Ld6P1a0vBesTEb z@#gA_==^>Tawr+2d#A2bsCyH2D4fyD_-y?O`IVhtEkQuPrR;WR>hHz>04!4x`42uW zd~jf0Jp=TQ2GMsiz#64C^n>9Qy&msuqEg@^XG)U3gii(NI~1W5 zz9{^CKCYPkWq^ahBT5qBB_;m=uJv8>B=hJFk3kjq3u#@FDKGubFFa^%Ozfn7fNGiL z)Rza)%zlY;qWrc;uc0#EbNX^iuZ8jQ#fMOm@UfA+($%49A5tA&`e30i^+%j~{{U`} zS@_?6df=ZbV)Yzos$0f0p!r3W&aZPTyu7}0y6Sw>CMkje&SyWW`mkkP1JasTN?tV% zv8g=|CO5{ViY-~X#ghXxv|1bKWgQNQ(%-7m)s z!ZLvqf6qdE(bwIDWjaD>8EBx-8mNW-!C&{&r6{x7DVaKLbvx-jT;Mv!G;w0TZ}A?# zOSM9Ib93ygo}Om7(D8SzNEA2kZ4{j-nHA%h*0FZ# z{mrDpe~f#Y-8!rMg_xhO9dgu|+3#n$*HO~+wFe`l0vu}7&@X)g8Umj`r#~z4>)`8` zC_VoG`!_$W$merwSE^AkmYMxi%qa|xU)-B>TC%Lk*5CnB{7o^?RDlG~w>9p}i6kQY1jW zh3mxLyS!R$lo-MB{dD^O0F~0a;Ua5d#Ye1d*VLgUt-+`ldS=BC`o1V{&0Zrc zJw*~al78?qa=ErNW$CM-+ooWPdQLDjQg2+)vVxL_o-st(qo+xtMZceNA>NO-qKRfh zk5kQyXf;Z3)IV9CR?MW_bzrQm7Ei816EyHgpsVe7K3*jAnjDl5ja3BEOp|nhGCorZ zRMb^MHNmxDYko0$pyV_E0DF`2hf_UJ0ONa0BnvOzmp-V;F&8=p%`C%(AUi8^pOv>4 zXSx3Xk;nN-m*lScjY3B~M+hBK&T?#6N^w&R-d}In?PUat2@04g1+SRHjjS40%;i3bn zw_WwO`&&WbdwRj!C36E3OM!PSRU+oT<899hyHmbBMCyVA7I z_2jsIwNr{0w-<|2I*aJ?VpQ%yl;7BE)@fq2Xud;R7pbRGYw~yym;V4C z6>|BUBD#L3GOqRXbC)2y?jJ(5nM5#*YAE*3kC?Y9(mv8q?>Phtl_cx#_nXT3hK`JKeS?LV;5{D`Bo`=ak(Y&}%7Jdh{SrqwYdwr-9+o-|LtJMt`@sKNUl!c>2d zYF+sQBW$R7t9B}Wu%O-4)b4MG*IxT|32QefdXQ^59#sU!+COj>6^7SarBFwSFNd`X zZW7Z|5Z|jXjQEUY60_6cgaz22k10`A>Qo~*AB7zvsi~ZnqW&Z7Ozm}bdd7dKg|>qA zDr_u=@Sc|*m^vM5BX2t`8IX^G>j|4s@}tvb4LV)UY5}gAbkVgkr2$TpsJX{QeH~WY zjIYz|&7YlUsclK`v+26IR44Y=FqU?yr~^f`P`Ig>E1hgMnyFMlodmtEu8nl7*t*!q zM(>d-c7JB#ibjW^^(mU(r*-|FPl*74B8d?#Cg>EfB~SkVx2Rw9>zBn-HKeijV;hp9 zbLh&To1p0EiB%42z{K`)-z-_?WjMvF`+fzQ8=gW!DE)C6)w1gP(>t-fnl7oer_T+k zhB@}i9l121tuJqv#=epP44>4MQ-;A;+q7Laxt_De8bviJ-#RSMid9`3q2rXf%>EcG z@-A{e9$j)bq1T6=o}zpWe3qosXZN4RvuU(tEb?O9mqqr*gGu_bx;uR>WxY?zakkNp z{{Sz1&ME~u#*yZ=-B4lL%`(qmR*m@**Af0p&itwv>ik}}Sl_~=bn(&6zlY1}IbXYK znXW63sXten(krMB8V<@1U`+ZbTE92o^Wx^m%SBgq`6`xNiHbx8aWjS&6csrx(SJ?`rg6PtHv^+L7DdmFh0={t@3v{Y+p{BguZy~keedEA7Z_)uDhqq zq9%KOhA6^;oq3Jc6P7F-eLaRIdQn@Wbm>v9EL~F};J;q6t|^XJ&bI3)(q=u1f1K|1 z3MK9FIWI&X4VroYe``XNdO(tegOcR)_=be$mML06<2aAUlss?6CNHqAzb=kncQs1p z$B|yV>g?6|Gg!0YvhfAYx-=CLy+f$=(VXALj}f+T``(_ZTQ}97L0386_WGAcPUEws z9a6KDVG~76vuh^^pPaCMF^WtaC>!c(kp04Y{8dcJu2B9fl^*SSN?X&i}{qf^1Agh7Iqi%xcMB8I#)7Me-LveDg9GsIl1EO`PS$X zys_p-o(A0gbZ+Nz-B3->p)o-Q172BPp+UNW{(}|yWfc}Rq_if3@o)1IQm$1sPKQfG z+`eS2M{dN!Pri6oozeQK%2Jv(&tKJK(A7SNrOecRP5CNq?rV=d4=&EJLkyh(HvG(e7-ySa&8SFJnZ{@ zh3-DHrl+NKL;$TSy1fZDVw~B}l4`&R5uQV*nK-b7KK!8bEp~9>#zV6qjczx zGP2#2W{pQ39)0u<(>F`0=%3ik=TSJ<)Ok=ebZh$32a0O4lqdfHRzE1d&YyF$JOSI} zY>~2VZ!VE4Cy#?MCuru6J%=<@6D)0Bvgvicz}CVtvG}zg|54L_ZE@4Yg>LxW4BUd!L;1OA+g-bk&P%`Z?+FO-Jf%-d-v4A}T*sA`b^mY#Y@RzwH`Qy$J&_Vt+Fiy{AO)@oZ zHwW2^vHRa*%l`l)tdv#lr}b%a6K==LK3LcT>_*4xdL}~kP}6NRk|G^BKTM3*A1Ye+ zS_r6Nc;FKTt2h0>ON}SHVf_Px06aIs#ML?=0#x>1ZV;p z{nc-!qH+Ue)wSsGclzP-6e6_BaJO&(Q}U@4`7h0 zb^PLqfW7`e0p_p|x8Ob{rKRQ`4aJY^ogdo^Ti_P^f9Y<%#*P^Ns)+8RL0bj+ikLgRMDfT12! zRGwrW(eeKPkoF4&fW-XcL-hwXKqSG>L=zMQKTFrUpAC1}8kvq$EhhNXVI{5q0J5*+ zs2!>vUpWbr>KyGsCth!B=3B9DJX>E~8uHH&F*lExc&|4EG2FdXPy}x=bd5`7X3#jb zn;k2>TBe==;Vr8c5BSdk@bMKzGXDU%rXlmBIiLO=J^ui=2j|TH0O$CR&U}1-l_d@P zOwS*kS$TU6pCBWv_{<+N62MU&w|*8_JBB7m%DL=mT^LFx;LZ4`^-XWmAtG32e}Vp6 z_85i+;HvgY@>pSTzPqjBIvCs*TgQ+G<)WHsiut}yF0Q#u!WM(66ANjVXE>fJO%nV7LYA@?7Kc63u zkn+#hOR3XZa=ZTkA-nB!_ZA5FsA-^*pl;^>09NnU3H(UfgCFP&M&Q8g^r`UoQGrfp zkAvGSYGnTa;~4Cn;iP_fx(2~R5B|*gXVTV(*XRlT#A|Z_nZwS-hI^cih~W-G5-MBjhj{DOXmT^ z=CoVqNn;wR>v6mdm*PpFY8s@yAIX{q#5pZzt>4@rq5X33`TX(Zbg$?0 zv$Z=bm56)j_@$Lkk0=m6vJDB+k4uCNt2s;9q}LaB@<;QLv3}_{dBC2w0QH}0kKf8y zDpk?&YNeYtw1(N4fRudsPPEt4=zPIR2koIuDMjZ7tKeByO`x^oP_%tjGwf6WpB~jt zze{`Fnyo8Q_>ROs&>{{T{pj-DUmeU6VMb11`6Ue#&^FX^}zELkG1ikvRBvV*@FmW1{D^JT?O94^y5r$d(N!L{(MWjCywVKS|;fMOVPFG4^tty%K{{YnX^4FNI zhuJ!1XvWGO`&yT=AkSf|DO&xmavslJpOF4P+2u$@&x|bSJ-g%2pPJ`SH0RReSpBlg4#*Kyv?^o!~Ko<4G6KiQC-#&e7~G9U48k(&sDRREw||5OqI}uK79t08%S9Nf3zy z6ofSZgvA`)jQlyDSAp{*r1XP%yy`c3YkR(wzKJW@m(nX;9{8ulu4PiDb^UEAVy~l# z`cp5R1**PbK}Uz?EjTD9&Yx8Hg#OtrvWi6_9t6yz=6}ZB<6lzJ01E-5LPPA(Ac2)* zfGEll;@7dE?$KhTR-tO8wnvWW&QqdDdA#(|D&1K4DM=KqlDs{CXd~Y;vvFd@dl>H= z-=3>&38CiN*D`B%eNR7{AJ4VW{C+DyKD8qQx<2DvqN-}5jH?pXvR`NZqW(|Slv((9 z=2bH2IlLSyy6L;2ar0vL&veL^TKa6RUnPw%l4|VI1gr&9pW`(CNzS4UPdlpFwSLl2PtdB0rq)!(I*Ix-{@n}DQ2ziWg(XmYeTB-E zw*EiieW(84(sSm@YO;{y$3*;$MQmhu(5H^i(7{OIf*nVt=O{+bi4%!q+Lrt z8$=bFYD~R)X#W7(2dnIDYmPR16sU>mH+*}wHQVOw?D05}Ml7|QAH2wyA<d_%dSnwav{V(^SIJh1~-%>3dF`l4_EyRYH93dk>(84R4+> z(>&1$zeUvi0tEK5h?xGIPV$qV`-V=*tLgqj`3e=u zp&s_p4F`z`@>K%VpFjL;QYTmQpmSG7m)Z^ZXOUakEs`~?EG9NqQ@fmu;`=#$5~>4A zpZCFX&&OS3PqW%(^?e}KfNL4L!t!$(ejhArT5dm&5gg3ZJ@~;9A1xx>@-6w$kR9$5 z@bV844L|;XkDh2ZeVg;;@(RzNoiq6V03VKUm(q6Wb7v~~(pCLdf}XLch%R##x!@u4aE?wO`}s$t=P_wMKkOE~niF z^daRbYNu9JbGrQgZ3!xQch^VanJEdZL{;Yh~P9j01{OCDQdiHj&akZh9Mz6H0 zD3c6c&o7k^ueBcq&Hn)2_`KUM*;@SrqO)471gt2R<+~3+o#UUlG?naPmXubUnR0O# z&i4L1G($~0C)()SY`dW4NBdPfCdaJCd=8r(hE8^z;%D(oC*EQz#R2`0nc7+K%mVZw z`$URn`Rkfgo1-#E&ZNJB{{TytszR@)Jb13q2h?pJVsI*1@7H@7A%$+|crMV4(~Niq z1N}>92;b2U)0T7pGua*BWL(`fvq`Vxty5u_SS5ZBk2;Hpq@d09QgNnxPwfX z+~1ZK8x~YF1rSaAp3@c1g-)*)Qq~sWEJQr@j%MJj4W+t1*Ab1JPE|BX&*ewX>2{q3 zDTko!bu1dDKS#^wnrcx#U%wvopgyO&o)b{QaC1inH~DH*&phzl`4SS=k#Cx}+P{+D z`-8}Qf3NjVL`x23r9SY{!W|;=#e9#KhP6Sj_)9T^@$>ygSiKS^0Hib)$kx5~QeP`S zmuWPI%&EaOf`LL`=hy4^?~6Mhk4MXvGV60Wom+la{q4c0rvQ4`%9Kr7oh@{}iWob# zQpK@?2PS8FQ|Cu_Y5s9X(rW^2whI)GM=rUSGd=B{RAj1QxLomP)7n?&zI+fGPur86#%6ZKKRVuK9{P?+Ef_8aogXhOeo-IC<#RGZ}%(ad=Y{;ePQpYZW zTF=g>JgTI8eoZAhMB6d}#_!I*9%|qLtDpPVFXu`sHr|BX;J!~Km+jQAZ{M_mR>@5N z0Of0y%QjaRD*c_jj&Fx#z0aN7bx->fb4x*BBB|!|*H!@w66$t0C8}(VooCmO%OeMC zSvHCpyGxgenX-AD{{a0kSXh=(h~9Zp-7NI@H-&WHiT+Jb8=J^3cjPw@528WnJ87mf z2Q4}xu4}ceX*BRz_9{oMbR*C8_&;Cql=HtO?s2;oqMd= ziFQ^B%U*BUwTWXCeO{AzxdfHk)WKJTNF#-*ZEm#Zp5H!n$4}R_KerU(*R`{20a-sl zzZ`4&(VfhiZh*a5{QBeVUeP`@Un4*OW@S1VA=W=xEI*X)x+>S8)KF^cbAtyZHv2Vf zsw@vR$yT&m_K=aqXQ*{*dQpjmFE;}@#Q5tXJ;6t&R*%2#FG%;`5Ki`a?q~l1OLW)s z<)_cdLAlH>Q*Jp0+R+y&)3rN|UWp|3#SI!?G|xtV<5J`xE-c9z?B~xatL(90y?8nl zwR;4NCX&p4$S$`y+3I*%G``j-Yg{z)RgHnj^I|J#S6I8x)mHS()thHXo*xv@>?3Y( zusjmL6Q76-;RMx-DvzB>NiM)2?CI2hY~4K^+^U0u2#1)WK+&t`SiF$EJefZo{J80# zY@}}dd|71e-FMj+eP!$4yv|a*o|+}pO%--L39mW`ag$3~$@u3Xx0@st{{XvH6F{o@ zl)g<_sS&g6?x{^~xW2!)%3txJMPG|uTk7Srj=p)ewy6D5$e)0_%_A~`n3<=Z^{&p%0w4R%sq6` zM<#Sa+cSM-;+f(uM1RQE@8h2f@FOfw3ysPB%F}u)vC<&vM5#SA+g2Z@=GCX_-#S6nc0p`*i`WnTtMO;zR%( zzIQ>Ey0vEgo{Rl2TGfO&qZs`L^YJY9>Art*zpU2gB}q)$C5rwE^sjf%;B=O2!4H)Z zF>KcV0NJV!5C!<<{7)Qa(J%CWfWpg`8Z*$6Asoh`hQV{ z(++%Mbd>BwZ$m^&_+>wOy0J{yza-;wMLgWI6VOYHDWjU{!ky4bI2QR6_XshOdR}*8RCub0?uEW%2!1T9wPR)8yb>j8FdJXpq!2fFkALrhZ$g=y9V})AZqwz^wuGg6UeB^0DM>eae73?rlF8U^xjiaL zA8ygMoRoIi3X0p5pok7t;Z=##ovhi^B;n`&YkgTF{{SBRXKU(mc{9Z*J~glHxyDZD zPb+^Q1TSO`wOfaB6_XnN&x3K@t^%5#Q>}`5JeCvrA%BiT1&(^e%jKonTAG@4wfl=( zH%6Sn4voe#U&-Rt_`r0~uuFcyNjD)K9(?%295?(+MAb?^1~`jW-B_xU!R@}3^wx+V z&QiIj_PQi>Ex{f!7~tMfR4LO|br}|bsmb=Y)GPgOVVIe}3H)z1x6RQEO?TQ67#+8c zkaPWWm>Q>%6t5ah3fCs;p|=Uam-IZ7D+w#J?2$Z>8wx#Vkcf?u6xebf}kp zvXg5*%`+3ZQP(`tDP#5xFJpI1j8!{(s_Y*j6>?{lT{~EPUf-YAnF_TN=hsq}#2{Is z{+GXN@pC3#U@0GpgE}RDF0PU%KC=1vrbg!rW@yz0^j=Ju5k|2l3~u{3M??C3b$2-Y z1B$zj${0uC^|+Cxjie2a)V9y&Pgt4a=XDD+AYR*(`%azjAwQGfbp!KdB%%6#g-1nT znQ)PlyFSY!p0F)NX9u_?P=8H)ekU$z`TX9Kt=^gdakiW^dZj4aQzpcjH40{sX?c4X zmUsgFCBK#RPo%Vk#B|Cnd#c>O!77F)Q&~Q;rPd7aP?l!ufREuTXEhcry3`#|GBG4~9*#VRc)xHcNVhZfpU&0sYrl!$Kl zYYC$DDjMD~+p@4VdTBK755qyr{{TvfLo$`H!JC3UIl1d4@5k$fLkZ8s$FbFHOxCWN zT7GKzietS;YEqn*+bvjCD7I~j z?P*Uk(ffZfKseeqV{7m4a=Dk$d-ls{e?Cu^*3}?AgPOlN{8c3C&rasBCsDlJK()@3 z^N+)taQAsC4X=<7S;>4e0HmnVph}E6(czS@X#`U&;;{8RJ$80W{{T_{04tuN^-bH0 zfgU6F^W!>=o0{IB4=O?`8PJ^P{l7)B{m@e}a|xXpGnpu)24w(Ao@e8O zD}VX|VxR3HSxS!+;Rx8|bu?QkJdlH0j zn1*x}5&aZ(aM1t{G(e4Q$l@&o(qb!yWo}pzCU!M~)`R~5Tp!C6$p-PNf5@JOds8J{ zE34b+kWj2Ki9S?(BUkJc2gYRehADd&;hbPT5AK}~#~bomI(sIE?cAkQ-oSIPT6$8p z6xB1Rrf44OpU#yYk!38wr<8pTQaQbDT)e-?euBE}e@RtM6jhfVeizfQT&)0J^j=El z(0qy^hoM^ifqjVKubaVRX_KbPzT*0pE-LgdJC}+2_vreb^T(CLH@>JL{{Y$7le($f z5dF@-KAnnlY{m1V?NysksjNX5FDLS)8glIBx;3)n4xHq6DfV~*0Y|W)fQAj%@h?i7`w%c0A#LNn5$Q0d3;b2IQvAKk84d1czpc?j((>jzp zo*anzJqJd|sn4QG-o@h+TYWN?`iEENnwpoaVTSyAbJOBG<6V6QwySzDU@nRaf7fj| z@|eA8h{bvFD2W8}B&x55|vEqURH18(_?;=e(B|Iwinat>1x(854l=1G))6~;8hDBO!BYtK1JyKThZ?P zoBo*kjRIVxnv)YP$_J#bW@=a+9-U~BITybSeXT?0jqUt%l~JHG_S88unllKmlx6;b9lzc@if4c4x4g z$LG4PZ0M{{Gii2DSJ=O%3_yJJI@EM+%>(vUWM`(3-p^c>%`xRk&ZgG4Op)W6s~2w3 zLc!Ud#x<-*o(GWmPM-%30|@JQi4lBzF>KKOf7#*iW~4EB@$rSYdAdjEpRHD7bqqpu za6PQmhAnDZntnxsA-=88bC1l{dHsi)?4s8H0ONuRE~LEALs1X=&S%d1eO@!6h3w1` z6HZg&Wu80M{D+>@v%SK0{$PC;sUi2d&4tw8&h-H9t07!|xxJXhqOh0y*ZYUq!0o!9 z9%y{JrjR>Y1(-_IYKO@6%rF%$b>m3cp^MTMXcc@%bQ`j_@~*FIXOu#ObSkhUP-v$q z8Q(^TR8v#5Z;nxlU!fK|)2_;tN6~gCpucyx=WTR4rBmdznm$|;hKYMr!AsU+wuND% z%j`ZIJ5mdTDfOY0Vw^ZXF_YDgLf^mFqJlqRPR#gDV+DVqy*}96BAQL-JuFQTH7fGa zv)Mj+Ojq*8P3va-XMubZ30d8vpdV5H0FHLJ+}=CdmbESBmlyIj)!3fM38kf1X{i4I zQcU{?l5#U;KCH$Pe*4)>{U69b6Z|=>ldwGrUr!sZK8m>dnC5f2k5@u%==}JjPk&}c zxW4*kH1q9CkZkDkI4ycm(Q&7;#HwABM|Gbxd(yhBmneTj&)VvNdSYzk4~Ax{otAve z05+p38o!bJigPm$2Q``}ldt~(-PT{P+G4t$mqkj=P51d11HTsYmaD5I8pRCcPL#dS?vwqKOs}yw!-l3Ccs}FCEwQ9?W8t%;m+*h;i5b5hcSJq1* zjh0L6_iWtqL(Y(Gy0UQLvS@i$k#agu-g|htZg_N4&!WOl1$#-&z_C#D20y7g^jcMo zH1WF=lVCeK`BKhtwzj+kgKg{vm`q{!G(n?hL6em@%422yw%y*b0!JsCrMw8eGKUvR=-323q?>i zF<46}kAQ8BW`3JjM17xSf@@~K-AR}{+e3cSy@qE&KMn8F`Dezpzdy*2oVB&H*Gr}N z_5T1PFLOPZL|^@vG)FMQ$bZ2SJO|H9^}zgU%$x7q`7bU&VgCS!aTNXVs_#9|CEy|H z#wEFmHCm>839V!)O1wlAvH9Kq02m?xnf$kR{0I;D`7ir({&)WXpVB`beX9MO)`ao& zF8U;mqSj4aJ0>`fqeZ_&v~rXRqMQ{g<XqG};Nh~;`s z%PD0FU(j@vDq`2B>Rr01<>-RBoWIEmTDi~aK&*Sk)Fc2$dIyd39%tv8C%_Zk}MU#~35(yS4cb<=r3JIhInsT3u4k z5I=*OPvc4nmD=g^5G6K!q(?{2J5`n8!_yg5l@L*I#=%`8VVJUY{{j$5lt~3Uk|=6gL~6?4UD?dqHM5 zx;Aqv{i+qX)ImFh*3X|HSr~(rxqw;gC6~x(k^cbkzQFk(KY=05`xof5=ffB)!23N1 z_-~3*toYYcN5S|jKl}auE5h8f^3TcQ`zq=-46qSIEA+FQGm|Q3od@Y0R3}g)(CQf7 zZ4P$*h7q43y!`|Wp4TvtK%x(>P0het=|L&F=b=mjsZ3h8K~#olJXV%Srondl#lA}P zE_s8WGCBS}e?pSL%Guz=u{ZLXb4|10&n_*vKMjsLX@HwT{Bl* zu1p5;{Ojg#$>uyNz7wIo>@WP3)?Bc5*1i7#Yf>txfxiaHVCyx!Ersgmp;Gf=4hZw2 zpT>5bQ$a^qI#dqPm9s7NG^_IR#hUwczQ5)mgc7|inpQ}kG4`@G z#q3AJ8sCXZM^dGn!u%hx;})O0TE{okkpBP~_gTN~3cr_ATJJy@h$)VaJPn&Zsr!hH zNMeAS`sH+&m{lsNXv0pIl+QWZH!&{F0}FX$r6IUMSG1A>wjkIl~? zEd$RIC+6lB?}u*yfancmVT$` zIkh94`c|%GONEt%Q+#b74|Jvhy(ov}OhP9df79B41MMy+E1^R|7#68!nx2YYsp*5R7t!1p%g zXuAQ#Z4nsy_Rj^0S53TWN+icGPH87$Tf0{|gu!02o@h~+hp682=m%%(HoRv;v3=EV z5;FyQmpvtb}D+e71Br9g1YNQ*CneQ^?u+ zuqO6cn`f7#e|=X?YsXSvv}F{Hf`wJjX8O95@|#0HDwfQ-y=UsB=hrvTTK@oU`gq!) zpXs4ls9tljlro>9x21RjQJh@E5~NT+Rr&v%t^QIy)G_RbLXsA(H^Yn z@xIcQg)F0+L`3T!uz)s*m+SI)`~8?rl*P~eqNT*Z{B`oiJz9_Y==9LJ+4eleogz(6 zEMi8SEtY5ZJKk!x&WGk-kXhbrjf%@x49{mi4g=15T{@~CNHu)>=%zVDU}S4kDB#M^2(QFACOte=DVB{d3aw=qOZrE1jss@ z5VL0II$3S@y$KfOAQWCaur(5VF1M8~C$HA(OVdHte_lyr;gp=Nab6h?k-7ZIqh`lH zr+;yvNcvdmCCvSsyx}Ra+Ja20T;3+L`5n)2)$At_e7hU$E%^q9gVwg?(Nz@vKgT)u z4Zl#|lK%5NV&q3m)@R>1D9O6-#{+T2B%}+Y59=wX&ipTjJ0r8T)=giLQ~sq>dhrOJ zX%p>N#RE@+9M!YwjM+L-C!F3Nrt-Is&UG>fyse^U<>iH=gXphF88?Hs0)>l5E$w}sM0ihf-pEg9HfMRc?P1@eSe z1@wKlC!BpGgw5x?OCzg)G;64=xs^$F!#vFUHbkz3a(_dPaw@Lgfl5<~%h&NMea{?n zi(lWF8EctLP37>Mz$@N5X~_})0Jkq|XR+bEW0_C%tdm}Y{(L>Sv*|gM2UQo8!F!t> z8@!Q{tVlByw^7VG1Xyn%`eiLLWfqo;o?OmUB*gLC8yl8BDvz~Y;Lx0RmotuvQe@9& z&o(&a3AHqqkP$nJ`3L1`zRCJsZn?S@$Fq4+VXzZF3xn(-#yIGby2jT^!I>RBWnDug z3Sapge2!-`i_R{WFPjAMs;rCcQAI&<9B(;ef75*PF+9b-3{nhWeorQQza!zm>Eq+e z(dqEU*)^YigFP*iUUaUk&6k_zG}ohKdJu! z82nR#zmm=Q4$jNEk%zJ|`gJL4mO5Uw^XL+;Vr4xt>Eue?#VPIbGhHb{r{=qv(%xT5 z`+WI47KXf92HBZTKS1X(#!K!_=c%6TnX6%Oe+m44_uYRl?r-E+s%e=|#-p2Te7q`c z-(s8f9a7+l&L+5ra&+w&weu(Zp-7vGnQ5o-txMKi+8-=yp6x{&if`J7&+j~HMAS%@ z3ye#i8At2!llOBfv%04D{{S73TmIK4n$ibjKOn}+6Do3uK`e4>zbHi1HDx(xg1S9S zs8zEI+Y^H7;LPgG#z>4!)BSq|E_gOomyUhZGZyKO+x5Rt+tlpU)O3G8wUc^QeR)|# z z<)bfOi@--m7aPuyiI6Or0|5~{l-ezi`H$aL&FrH{#M;TijGl!88Q@p`+tXSrk%=if z+_%%mwn>N3`+n)RYu+Hr8||rH*3Q@2&y(eeifD{1gE{vx)pOakxf9Vnj{1J^`sm$U z#-gj~IHyW=vl4+`=l*B;>rj<`bA7qe<6 zAqM`j-6#;+IhFIfRLLUu(nyIHd;N;3tQx^m*y65MdOx{-9`6qm{eB9{nFjH{Bk6T| zZ+ho+T}uYg4y*yy41G}gHfn%p$jCS8`qE;5R%U*eK##2^CNRu7gXJ!Sz_&G6TY~b{ zv3{}E=d6c4g^T{nIm$yhrTCxa9qxA?CQftxekF9Jko{yBER)S=JuuhKU&aU5di7pl zIr?@p`kEYjZb zTF+yqb;4dly+!l_KN0bxP1?tVs}Fau9X{u)QW#EOtztBKQcR1)4UVWqax)k06#LpV zQd)q@TK@nF149ep%vxch?s|Is&M#pVE_Mncsbd{M^(kvbXjfRlHKJaO{Z&y#;4H(f zZTZmc^l5pf+qqv}o|bRN*Yr;4QUZDQ`EnsX4O5LYQiV3hF}*LH`Au=OGdkP(0ovy@ z^=({p@}=-8%j0w!CT#H>&BuX1S?Ss4J?eD9UOa!t>+1ww?`CB+Q%;Z8LIQ~+}4{&R-87Ou<*n|0jzVpMM)z(!|MKWpT#=V{r;V{ zvK++ir_Zvc8FPM;jrD=0UWFlPk>6j?kDo6=@9FEDnujl0?t+2UK2K8?zf*tj_%-;C zx8#fY76E(@!y2E(R+D!pgRCO=CTOD{_{8pX$x@V1tQ`Moy&pY^pp+xSjL5ru!--`1{+q z#|Mv)h93U_kDOWI{{YYU{{RR6f9K+`jlB8g?a5BbK&@tNw~NT+4`1a5XW`*RF&G!A zxw%7d*v2cwx)(JQQxT-0>ubKHogL{gRRGTSQH<7g7Wb#=nHSH=O!&wqVp;m?{wc$td`CHfoWobzts#hW=W6e7Sm$=kMiQmPymehFYL_GTziZpdO!l zoHjxQ!;9n1oVmm7eQsYU>*n^}hN&D8pPfY~Eo*7K>U?Svb00Y&{YcZ*&!?}bTUUxn znDXV%&cE>vA1jc^XZ9OuIHaJo=aI~6yyR_gd4lYS6=cxbn!I(q#1=CUZlQ~v;C zJt?wz^PFvZmZx)$rNu(ce_mMd7-v`>^&sUhuR|~Vd>xWuu`Ix~R=}n%ZMKi7D|(*x zDP+7?xq>?H*Wq_X&H}pb#y&?pTkc09S(XLP-SzvCEl;{f;*>gh>D^z)ii0K2zz1ig zY8hzsgQiZmnclCY#}M;+dY`pDFAc?*GaWsT6Pv95jKCr__d@Hz$K92&&Dk?`ZB>CE z(IZy05cHqaeNrKDu6UT~WlCYQYP)r!{ z*QFtLf>kw%>s|s}B)77wsv$y0pFnwARj+#zm8LuuFrP>9HCdR!`s?#nPsOPaa)OEG z=4VSiY_d)%mG{`0GLU-(v|J4OJMl{-M7v>gv+PcucjDiTRn0EgAR92#Y)aon@T@!r=aIggP(GsUf0TJy9t@} zkjhfGqhtNXWWw*p`oDWS5J%939eG(U#4e{R@g9)*(R|OEk@YUmc42~&8A)6Wp{gQe z{{UodlE$@Xn@rvVBR&~P=e2^Qmg3F-062N>_mA^vmV}EoM_*f!#YXdxhDZ{2TwKz! zcSK&jaITRrWgZF;@R6R?NFGdgE=L44552ay|=e_TSW_?1Gc2ouHWf zP51=YBM^#)78w&+Ml=u$pU7~-G=H%C*z6yG_dgJA@)s){TG+X@pYB}%Pd2L0$$Iz@ z4*P3U==eMJ-%+gY~F{%&th&%{& zLQC)^vY6_$Vx_RP(k&GwS0Qy~kMz&t{{V8V^pvb0uElJupi9@EEC_8w{{Uu-p<}pv z=T)sOO+sckRD#Cves5 zY5*i(?rbLh-OuSIUWa{2bD_|xnpO82T|%7Q!@6jezHc!d&wJac96MGUHA|;e{q|DR z*cBIqtusnyiR~G5-mcerdb>W@+rf9!Sbp9j0Q4!8)rbDRxo50JMKziRU^Af}xxsSt zkEXe+OewtOnE_(uA~b^6GJcx?8YmVKs!i|=6{}yO+{EYgiyxlGAwS3gnYyQSy7g20 zw11KP22OuJ3K?e`r0Bzuv2K=N3M^xt63Kvt(GQhZt%|0f$oV|_&0LfBHXpXctkcTp z^kzC_t-0YZjOemevHmO1u8^nVY!g|2P5BoyCO=D}C#_HSRs{Qe)4Yq*kdFro8n%Ab$V}KjK@P zP0W~uD+Bxrp?;zA7bJ~W{~GO-uKm4(^5KJF*aMu{@7HAwym5qKN zO`aL;&CMKLGgwE2p{{j6!!Y<3Y47xiOx^T@4?Vx+gbP8M1FQ=_AzDooHqXc?wKrz1 z)6Umf$B9mtepb8rV!C=N6uTLg%wTNAEo?*-OYH_G?-#W6SH2Ouq1JVDAGuG~((?ja z90E?FF~D{^Jtowb*GXbo3q#a+Uc`&rMtyVt0G59u5EDtY>mOFJG>c7k@-d9LmU1c; zP4G%v*#7`E&S}YktR2=JAfO2MNbz)R?^2eng;KMn_e-4KM>MMc09E~z)NN)wqD$Ma zBC$W%(IgDb9Y!&a)njbq`-B|%1KDolkHafDi`8Dz)+jwDEav%o80Ch z^()@?r2eJEas7*SlbRYnxo8%%ckULcY}B?y>FR#(aY^O<-LhkKPUUuTt1af$wY$fk zKTJQabe$A^hr)Qr?Ff5xx>v970QV2ZIOvQy{{W8B9KzjqVdF2CDwM~^jh?!-8F|rd6oPFrr&!dSWAj(-we$G4 zwlmn@Y~*7%)YBRt?b?|yvp?>MwfjV#da8Li3f8-Y+9N#KIsAw@0ao0JdN=nS=x3PN zpSMADEIS9Pzy_oCkH?Nz@8= zU+ctjFq;Za>JXpNbbl&&y31^jps7$^AGcEpX3g=^0UN5JegG|avU#;j2$L>BJit8e z;=2aC>CfM13Q7kxNa|wdZ$%YSux2BZT@9JvT5eqrDrmJB*(7=B^SXbQl+;qW-uXWn zrVz-NMjR|hWl8EpySfypUSpOsStPk)wiF;}o|f`e7e%nQmBa7X^E0O#j0GIn;W)2r zlUUdxwrX-ljWj4~5F<4y3_S;&K;#Tgo>OjVg53RZy>DYkT3>CNuUD^AyP!)fBG1x% zInnFY8e{GL;Zp{~Cq*qyl4=&3SPSud!Lk7&b*X;_4{jTrq04p3Q$4FmYd1nV&qxaHDn=;kOdZc^c<&Os{2n|mQyf=jMmHU z^tIkfXVWTod<@pFZ1dv2t6$%lsN~7dSM4fNu}lt(wL^;it%?^mKH%oc>cM;P$EEv!AqbZs_*9N}M+T0J%Ap8#git>703)EWnR#H(JMz=STI! zS2kJY4pCk1>Q@30us3@sl9t4Ch z!JS1;2mc%QwGY{{SnJ+@^ZwR-yC3UM6CxNG4yM1f1ZhU&`t=9Ugkvx!&&~D_2g> z_N1!RYO6}F6pEPHPbZlDbt(;HsKD;?HECkG5XJB2&AHMv_Fhtps+g~u1~4qCpl+pNo_T`lWd@^F^s2sAW ziEFN!9{O2-)%(94w3IVvm{qw#qAE`kqK{{WG7xo`35PWk%nVA_s%L3*x~+~3IpunIm$qS(Nmi2^4c2vrSB z1<;~cJPcGz&y05b00|nv57@V-Ja;fLS^^p4@zI*mlU5x1+CFS4gV?1TP)vF>88&5J zA*Q_?@be%0SU^=O>-JcmY65Hd+mcPHa#Ghu!0Th8$2?-@ zrXt~0Un;JBT^mk|B!3`|@NZgKdi>F0^dA(ZJ19f2Ei|0fD$Dhrntve~lP5oxudFXv zZctApZf-RH0EgKLQMz@s-c)_d5=iyTpMd5)Ync^YHgH10cF|8SlL=SWrJT={5DP4{ z70dH*GwK6DOV4Xqd--Py%Ezzxx8)qZlpGr9Lw(c!LD1$}rE;#x&}!dfd~1`oe@aQp z6I)R&3P;|dqIx?II&#w`e9u#LYI{Y0KisA=QX@iIfq6F?rVPJTpuePJj2eH&E6d9W zepR_dr{p>1zak=kKDPe=B$xjHb1Ff9S%Dr?hok=h*z^2&{1&K}nqNMD9$DkR=jDEV zPLD_OPC-x3lm(p4mS#2W452>Rx=CVR(4}L|bmi>I)kZZfox-M1=~RDHc|7h-EUcJ| zTsK_zNZxzX?e$Nb^qids2~RWF*O{OfLW^*}OQ?}Pb27)`M>Se3qIa5%2mbmi!tx>0 zKQDTezh9QWpz8ksAWD}c)LHphV#gReYUEr{T}@(ZHnKI}S;XyVeSLO$VqDM{I;M0M znjN~FZ3*_5jT9Ezih(1FvYazr3b5Nhf|Z*=eSKXj6xL3a>5|!PdKNzz-<7G1Qr z!^DWx%C&l{aSkB^4_cTrb-FB`c;c}xK;xMpK|eK+^l!QMnXDt4$J;~^YyJ#?zOm~) z%>yoklt}J;a>unlnqLs3C!IivQsFO}K9BA2)rNXUmIPljsB_ST^EsbEz5bFfl}OPx z$dOD>Vz;T1Z?$O~j$c$KqRQ8`DHxUgil*p86vXb6X^*SyrgApM>E@8=?M;HcU$Gq| z^aq|Zyot7D7oKN5M?2=p+9tPdE8EN!&(^ULxs*!vd6xZMbx;2Q>&>AIuvx1~(xBC# z_|0!WkICU4`UYMYI`F7zD-~rn&O^!l*U79|RSq+B1ynwp?L2d;<^ECIrr2yMAPD_D z{&g+pFwGOL%lVN(SexY0`*wv3K&p!T{Il+}OPYQIwcE5xB6vi@BNeTH-p4X>Zd{A%cT zmY8apXp{~2S6dxhA}T|0H2x|ut7E^k5EUF(GdbNnSz~h}t1}xt`%=6`exWaNP?}Z* zijha&rn)g59$2F0qoi*~R9%>%9U!Jgi>cQywA5n>X&`Xsy{aR zEKpPV_gC`{zvUlNR`tPa%*D|^Ha5MJ8P``^TZiC(U((nLJv~UUC)R;yOY*v}>hyYZ z+uZGOa(W-*(CJb~F9(E#15KV5Qr?2{g9$Q4r66yoo2K4$5U1(={;X&~S-Y|#i#ndM z_Z4~t>6}=ib144+k_P!O{s_NQag?ehf1>(D<*H}e`oc}Gty4@eYRei)4^SzfBdm-4 zbk3E95s^>Sf3RjegFn>gIVUhQdc8T&QuF#5N4I{ns~UfY$i+LA@^_7KQ^n$Q`u_m8 zudc&0nAA@hr0Cki%qe5&SEMT>Hr1hm@V$|#M^k!Vll}$PMs@msl72^4aqM2v6~45# zn)-8yE^wQe{)VP=Uq+vzV^np;cQ~PR0-C=fQohPK$v$bUK>%zeO)*j6upq7JiLZ&S zGii&0wNQBUe15+n{EN2}Z{#MA$K>Zl5($dvQXK9?j%^|HTqkSf2G;3l z0?*$=sYFWVb6t1rjOp|{QT8mHJ!-^BmP<4=s)|zse{W%~ro5FiD5Kco<8;35wDjMG zHa0C;FQ4`PljG>}U`RF7&zJJ=oi9dxf!P?#8u4BeDC~Y`8b35;RTLbV9JM~4f7BhD zul4!v&6b6+X^ffC)`5F0U)@ztHoNpS9w+hMTxtBXmnA$|s6o?Y^YagzKQ;1d{6eE*5HZdP{Uw!OvJHIPCGJYY%+;3GyW_GoWCACC{TJ}BkPYPS3^>CC{Nm`pV z0lfMD09?iXJJ4fkPgO(A_I+Qt-wB&MPSvFY=&S1DnjqIT@|umVEs?dc-mag zs-mx_NiXb0C;J}{>CWKNz(A~tzT}BSsWe;hOY`&>bSS~$$JpogA&+vDqDlTOif?mb zYbk|^kC`R(Pz)udzfnj)3F28KJc_C9aWzThveEH>dO)^`} z#*G!E+Nb`mMxg`{cW}6z=<_q`f&69$@zE41ET5nveY9wkY=W%o8 za(>V(sRXQ=-Cr(2ec6g@2j!J%6IQn7Z7X;2NdXV&pF?#%vGVgT6@vHpdjz*YM90Xr zMxF5g0K54A0Ny{QVphn>xYQ|(N5xBic_bcEwPHSzJOT|pE%^EJKxTR9@;UL{ys?hS z`rR|%rO)cGo1EOYmBe!1Rc*OXR&waPa&lT#R!dOPN!p*m{{STXW?nxr{{UCJ#qFq~ zLVo8KSmf!=wqji=F+9p<6MZ{gQ8OJn#HbDN{a4sNiGJZ_gFc)LIFaG*Hczu0zG}f) zD7oswt;Mm56peKf1-lp8o?{YzeDVL zi|qcn#mSaesq)e_GLKZu(Z~H>ap8>kDWRSO%l*1;YQQ~%qQYWR(Q%HQeZIeqG=f0W zID}IDbWq>zDDkCEhs}5yQs{A^PN9oeKj|iV5NN`6T(%R*^)y&}G9mGI+4~eO#m@6` z7`23e{ZXVv9S)PfZsu{4fYjy7RV?PO0bgv+k>A>wcOS%uo^&I|C5qkfT2%H`)BKG3 z--}mq%ggB55OVcBVb^?glO))L&&}t%+Yn`OfQ!RePYEsXo2B@InwxIrQFjxg)1NRT zZ1QwypX{s8KFixm!5BXvvIHe`7yCP=2W8Docd{7y(IvhNZy>Ym4HJhyeVd8b0~q>5 zQs)aYwXrl==vyzJ?x6hxKSU2h5zCMxM@y;) zRKs7me!Q_vzb=JP$~c_ou$ogQkoH*ar&I;AH3%#n;q-qQpRD^iSVTXL*7`o*I<08O z(TRKg>2NkSwP>|q<~2k7J!S^kJ^YKKqxT9urQSJ}#bc;t^(@u)8hMOw)#mZWseVTcrp*;X6nraE?CnM>w_~(v z?9ZQF*%vRN>76t!j94`GZj`gjwIM$qb%WNK{4Le9qPI?taf&@Q&NwcI$VCSuiPfz* zZYEdJTXmtXGc`DDyhm**ckL}52 zT@TTb)sY2%xl#Q58=ETeL6c4%Ed_7*t2ZtDnn~)8CT{ZOq)oCHRUT_qR68*!)lL;1 zoXGzG$QkH#!JxbeUUdB#?x~zROPCb1O=+}*ZB}lv6fKu}-oIDADc3brP3&-x(lgb` z?{xcb>h?Jc=o5MOsB7X!`?T8{v2%RD39@7|j~bNK;r*HtujuxRt!q}_tX0_1V$r#) z7FfqeIyLY~6*&)}<12J_25?Zidj*e%{2<7*zAE1E-rs zOsSN)>&63V{{RKKHh!t$cOIWx5y^v@FSQGtHgdXk^*dam>#!>7 zcBOgSoeql*%}cC`?@!sJ-0E(cntCR~+Gyv8h^~r1GcLd6^h?p+FGfypGGoBrvirP+ z$;5ODsHqeG0NiS?pI_1PV2p|ClOw}-*rW0}Z0U3Q5>59x<`=D$?MlA z<5h&5@#c~e#G)htF^}qa$1M}KX}T;q4tG^o*98XD1;pz9H!Xkk9b99wxodBk+3s`&7?HS{i#`Ar?1_JISp)0JQE zY{$ye=-FG;eExJ=^?gNK)c1Pqpy^cH@pnB(o{<7I;;32yTcJv`nbG{(ymt?$%8xx1F^VhuG}AgKtp5N+&%}AY1i;l<8hjeBuMC|D<$Seu z7qBBBTPO9;FQhD7gv3zVsrbX@{z$jSOeI+=9BB$CS4@GFvUVroZpw%O#`SN@Tt36x zKb-w_qUCiQ9?1EwT%TaDcdr9oME?ML=Q4J4a9=T8qd&V+loQYXJm{I-$4rYW?p`U{ zawjaf%IGgC)uD$&;s_mU_E#uDHO{?=K3fSI#5kl0U|fC|+(}~cbVwtpTHEVe05awC z*Y-iXiq@xd+v5KK9xvj9VRLQ1kl|yk_WG;SkD;%*RxvB7vvhQJ67u)X-Js5PM2G!R zZw%yYok11@4VX)#E?ojQ&hXvOmLi!A2Zw@6eqA#@0l&kXo8zUujyL4Ik>vC_DgFS; zAM2@R)Q9kpc}R0A#bQ(shn?iis6l3<{G~EgJgXC|t26$!nQjW*U>BM`b()gJd3$h; zKPC~-1@nt&oBRDx~Pq6hv<)xB)H)WMY# zyz7`bRr6TRa98j8sh0l$t?qpDJXNZ-Mk>aE7wdXojwE4p@Ks+sKOU*bqhdNbAJDZM z5IeE6s`=Jg&c_e-DJv(r;L1bt6jRad&C{dyi>dg|qHT9)#ZTPOpcbyK2lj{O%MRllT$1x)x%RN1}rR=eu&eqsSZdn&^&*vP#WHc zhtRxvW4_Q=LU=l@@yso3TpIJpyc_ZGM`B6IOPHo;YSMB3J2XGGrf!E-#_s{J-A75& z^?CHI06}cGL~Ns(scjQKV8pev;IWHe$8r5X@NCBP`c->yvbLa`Je>MJpio&fpCBt_ z>#1L77yO#7$8fr0eY1LAx-8R{8-Ba;uUW2TbOt{ukkqmJHKyXksf8d0?Bfx&In(E+ zL961qFg1zrFK61BokUgj&23S5HxlNl%EH!;rhVlkZHlelPY3saFK6S5qts?kZQ?UD z+DrDL3C?I}nm}`@1m-XXF59IJtUUh!tsBi0c}}TT_w(fXh{BmF%B_yu9ITOQ*>01e zA#;AkX0CVO{{V{dsZU9SDnFWQmz^F6m5;6NWi%RsL}Ms==a~RopW1t=&tA6fx!3sW z{*tSM>beg^pb05 zBa2N7vzyQ7qCdEO7GqRoQ?Y+`YFO1Tl)ApNYW$b~08pDZO%QaQ(dtPNbcI;HWOnqD zsLb7!J7{pB3}Z!yimsBDQf!IMcN#923JxP}RNEJ^X{O0}mA2olQrl))=6k?Qa5E#O z=hdZ>wArQl`jg+v*VPg}k!;4``Uq_ULbhR4>x!YkU}n(`G8V+;`T9)gcYpFelIbcq zJnBq3d)4YkY(+E#)7f?_&mBRnJmpP4$ScKjp=2JfQ&ss0#61kTPQ8|bn|DSiUvyz)hQkgUPXS)~;BX;KVynK|_TPk^^AJyzP1-nZ(z>L(?1mN!@m z76*5j$2k6l9JeXL&kv>)zQO4vem-zM?n*y&1Lb<<&fH!;Ww$>d=G3Pg_3<0cqX*

<%ef>{Qc-r~eYt;9V9adGw=tEw(jLUq*~9W5Gux7ZQWsgfS4LP6}oNlZE9DDoAuO<$LB{P|L)ul%1! zlsBhICMyp=Y&K{3rNd2>QKf?a07Jo`k9;%Fvl%=;bYrJE-+|?hAO6*h z2H++C0Qaa{xR*ZiNA?WMB1e{HV_l-XA3fhztwzivKk@xTr>p+}gp-FiJwza3pBQ7m z<1{~kW?+tIh7f-&`Tjo|AFhW|<`SdjAnB}A@^4VgKF^s$%NYLveg=Ik{{X$sAXa>{ z{(1KDhCs51(7q~V4RU#1UatLHo|cM<_WI=b zZkwy==5++n{i3RAm8xC83=R8dVzO`D-UTxLu;o)Nzm9f}j~~W5Uzh$u&U7|AH>}X( z^K$0&=-gO;?W1Y}Md!H2Gx3ku&7fHHWvvJWi?KP799?&lr4 z`FV%UX8t=v<0X}Gb7O0s)Iy)1qk5Lq*--krxZ*n9XX=}KH|s0ucIBL`W2eGpy^FI( zssmF-ur#Q)x^OUD@cS-Ne8|leOA-g&&em~x=e*d{cUs#8S6f##hq&6fU9A0-$Qn zwH3oh<2@(b&)c)|UU|uhfhk^dK@WQnw@>g6+c~P~U@p5l4?zdqRe>#Wrc(?aSuV`^^x;U75E!a!PJYO8XaoQK>#o9S_t+(68wJL$x9d=q{aI%{TA0KB%x* zi$~|hLxnjt0g4}b2h`Nkf0?4LWj~Jcc(L*kJmnudIgI?48NYNQdhgDX>|?8?{RLRn zS#V5@Z;XmU*y#uw&Z&vhNO<_vw0w+=U;&~N9kZ@2Ad$oMsA$%j0OXj&@r&ej;mbM2Br+x==-@A#IDVb9A*xTvN>$I<>@qX*t22-ZxO)_Q-=v03Od&nWep+6nqk zo26t?bT7B}P-Xytk3f!F7~M2_=Q}GuF$|0vv5YCkWZ+ZqIV+lD=@)uFtQF?GPw{wp zK8H@N&VW9o# z2QCS_Qhh#m33hfD%J$>p=>2p@>#L!E*BOmG)YL$=s(j9U9`=ov0SenyeAzkRueywS zIVOKon893tl-WR?LRC~Aw>MQ&)3mj+3CePm5lhckIp1^DD&ch>HHtrSM?NJvVKpB2 z)g?b;++-shZEu3mwcTaar7F3AM&F5j{-k>Ie#Vw8Tk%h@fJui! z*}bXTN?mAmF6N50N_OB~MZ(H{uvo9P{WI3h`4E59+K!(Lz(Du`=xK>*%ure5G#ji+f!E0E9n*G!LiHUW)R{NeGPrGZ4oF(_z^s z+$E*wl&)@QD{6nyIEsFebLGyk(kH(oBiq!liAb-#QJR$^CEJsAy6?nM{IL2WDf^CjS70{^>8* zexY(1DYTE-FXczpYNIey72||f5`%*iGxqv=5oM}?WMI(TB4PksnUlX5A0?#sn^!u3 zam`M%el>j=&3M33dvtKo{hC7D!d91&ui$lyA;MqyJt-?oZs6HoR+7devzhf!A;-my}p%G$oy-P zzuw{c&Za*_bIrXG6*1BdGWyoNcPh2?XZ@~l3!Qo{GS6ashw;AYE1#TeWfFK^f$2>+?Zv8V;DxX`J=V9QhGwWZr;XLIT6yc+>?4j# z1N_krHuN>=@L{c!eY>Mz8( zu`=JuMf7s|0Ke-Y=5t!A=ef;2Tycez9&^35QJF(Iy#c{Dy6a(iU7@&m zwClALocT1?I=OtoB6Dx;Resvjk+w0cEVllH1C~UbCTR4~GQX(Cbk} zKa^e1#di8!^Fn8{odos3oB7=Ps-F`6jV+y)LKK8i3c5qp+NR=K5Oil^YL4d1uKBu$ z8~55Q6JAcy3uj<&v(!#gma4XY^Z57M``>ADK?iqfX*=xN6ufCTi{Le!`ViCoxr_kZ zTni%2fAHR%T>7UDzvB^B6Y`hQ9z9RLu`h?(Q-PuJ9FZT|mi0MH3ln11~Fp5SDM=OxjX!I;`VzNx`FLr`(n>u9f=jLu`~93heQP* zO5wq8(?hZusMO{$?rXN3t#JNTeTn9CIRytFrOtXMot+p;VH80fp3>;$lsWmYU%lxW zD9vCtn%T2SV-D^=GpM% zFzCC@frpYg!V4~+59{{Z6$!^a3TH$R?l$B)OB{{R8q8{{N=*VSa)=H{@#S1Z50 z-g-V>S)n^qgY)1FU>E-YW8czjPcC_z%J$NlJ@q$6$C8SweUE)RjiIy6?%e)4=q4yZ z?r|UE({+<1d}q|_9moo@hIcDfN}@hbRM$9^T)42xu4`$Kx%ROUNz`cm-mK5bdf$zH zH(xLD(%yClHf~iWQydZEEWcsouAXQ%n@Rcfg}SP-Y1eujMn(3zEikw8rN?)$y={2% z_Rvmxbxr>OUS!CJFmQcsdXeeRAlcZ?aFlcq#J_50o*xV5p|sGMO{?eX%>7uI=pSaS zJC);py%b1#JEJ`kN9{vp_R>C7Q8F1zyne7X3(%;IKijiA5?}2k$Z7ffOSK}#H?i}IGaT~wetEtC2DCkVFCM6@h*cp4I zO45H#NDVo=oc2xM7aWqisPn6!hHBPiI`{9y>8T%Jo3btyyX5SlbZE`LsXH8A8lTnf zu>)Ad)dF+1j%^dV@pEL@IsHduMPEQJf^04wr20YqJ3hxwZr9}$7h{UK<>zu9tiDcS zEWiCutaqJ`1jHY>AndkSsCv06`B5yzEsyeU9jE)9&VMuQ_kO`?uUR78a^pxG%cY(5 zmN}+Kpqe@F5oG$m)t{MuV|5B--UA#_RK2&Me}S@2T6LoN%DPQV-|9<~G~oMa+(gYJ z^n1OndU^dy?ztS?b3my@RKA}U@A1^bNNHyrSCVuZB_)!lQQ#ld)dhURj#j=NsItu4xpIkxEWqO7HOJ*_deVhLI!J3d_0F$D)Utxxl5x>nU+ms>-;68{Q|9uU z)8%B%9+kary&Vv)j(~cbVBXRWXTHv&hwZ8O;d}StWF0QzH=OwAwaMls_WtX@k`}P- zIZ^L^QeA0K6gH;Eq<~_i!4d>Kb&CrX3fMnFYV5~7Xz>g$dq-QM(=6X62H+a;Tj~BK zx_KWmYG`wi0@qiwX%dnphsTH{71G zKO?WQGq#BE49lNaZ9d;uR@_|twI|-vRMJ9S16T&eP{MIl;Jzo{DLyOc6AThd%ILBj zLx-rh6TO2xZghh;yy=`O9XH_w*$* zQ>~&6ODAONK`;`O>q?fcO-h-j1y>WT&zFNnq2SN1}m+njHRRT`8=7>Ura(j zvu@(oRI11nlT*3KEcQFmo{2pyv^oC(nD=St+1~0dF#WTgX3ZZBUU_V@VoB_!&lb{k zKW%T;XH%t1CLO2*zfJ*LbQ|ykjRmanZ21;=pMj6}hUTqVgX$gd9Tz0AJ&Vz!XG@jP z{k*W4!gaBW=|m5WW9Oa<=SZBPKzTRDg{~lJ*5*yl?WdvBMLkOAm-hFuzgq(fLMXZP zWTd-yqBe~F44DQh7ap&D4S(e`eo)-%{yM99QgitL4Vqx3FmwA?04PD%zM`fOg_o&8 zXEbm+Ub!6ESzz;F-w7q=XF4h2$~N?_q^r;CUgi3=%M$sPS6aVW9!oCnzsS!%!AfVJ zJWi2!vdLRLpN|5rM8_Ma*}rd|lB!j7vB1j8DH&>|)W~69T6R_&+5E@2(C89BWMTd= zivh{rT2__{rY?5k(^PCFbXYPws)zM5%4VExIU25p{ymiRI=z0UvpN0=N9}t)%9V?H ziQ1>OA(kJGTzMp?3L14HafM6`dyYDMa65Di9lfRnHEjq=h$DXH(*GzG0jn>3Qt?TR+KyKX~SK zcyeVJk}l6SxM!jN01RT{o~J7(p|8a5V5Ou?51&S!=y@09(sw3K zd6Ig?ne_adl+4`S_O;CUuCGw}FW*g1J;MIidVNFsTkdO9H$7B1m|8jOf3@B#4;Op9 z%50DW&20MwewAm(_$%YZ`UNXP(ev;QczaqDG*AR$m8`ZugkzoR)|M44!W>nf$Q1N@RWv$-IZ;^9J z^05us_v}h6#ury(;B=*^?(Ne|ugcwv4o+Kgv% zKOE^#)wYvMRdU+rHC)}*;sc$NGIB)sCOvTr7&}zhO4kH*MYFv<0mV}*v)oAeX$EuN zkj-bcBE8J_kE!3)XU?suFVJ$qtFAT96EABgI>gQyixuU2{jSvp279afbe`ss)NI}J z-+3-HhL@SSi}>;L-}22boKSe~2pNulk1qoM06Fo`{CXKbZRb(aPtoaCr1TluZ>j1E zm6=v45b6p0G;^`{Gut87bF!?F7I0^pVr8I&ZPIfpZJ#zpuGOQ#q@-1$_Ptt2H;n5l zmFs8?37evR137@Io>JHIVA-CcY}YlcDpEuK0cmzL>6PB}8W!aIXZN#esuDqM!c&14s*q&wG{8pPU^30F)`;m7lG{FW$pA=E|iIqzisI=*FF^E1=#lbh9#=-2rF05#=Q_>Z?W zy^h?!Q1{pDT=~4%5<(QX+xAk&;&FZlO})>@wf_LP@HrL1(~FiBKi6HRN&9>sVhNM- z7w@*8_LU(~CN5|}OC^G0cNg*gKWDP;e>*JRbuxu&+b&mD>Lk06;nFK(B^z?O-rdrsi&G0n2CrO?quC=s(4d*5 z=MtR0WVb0OWe>8;+Bht|JrBtGvm^IZb(y316YKR^%;sBFb~idG`oFoiB)g>>Qgp64 zzN;9Fpr-w~8OHwrs;??1`vkb+@?yF0%%wWbo>`5@;<^}sO+?2!no=GVP|RBV9}wv> zY%K*m#8>hj+*p7sN9A8ThwwiOA@T4f4*vk3(#zAMr0gyK0O;lqr2NlIJ~WHt{15s6 z0G!{C@tz04{4Mu%rim(lk=UplC5Vb5q$ zuQs=>4K^Ip=Qv79^P|h>ix`{;nf_Y)Gp}DyR9fKt7VXMx2~3`KI9+U zHvYd3yOJ9iq&~LoD335B;ik%{K)O8 z)oni8Em7++sr2j`67x_3bDcujluX^RN4vOES9)^J&tbi##50+>x3Puxji#MX*H&v6 zgQmhj2!zRC{ObND6|<$6@+{PJ1oJ+L-#Ml?*}rW0$@cST=XoAZ+B8=ojLNGDU$+?# zB-=!%>l9d!ypE@u8%u+gwx@aQluTZwgL*vFd9A-D3lFCQ;=O~@U#t~b3W4_cs*l`G znb=fHpF5Qxd6Ag-)$rLDvVEm*?xzawTeK!;ZHo2htHpu!A4zx!eUfv2PK7_WDLRfyDuSF|vg4=K zL_0Jd6jk+8(wr!ei{)ckG#|~vM?NlVc0;Zg&4=QdKA0-^kVWiO-W!a6S_6^Q9-j@{{Y*Cj4;3U-`&rp^nKjq zRUgS&&@i4@0GBzl2N`oDouoy+OD`3n_(;ar`39;O7q7;WXQ6>w=bcVTs2Lg$*g$E@ zu-jTS^sC3#9@7`3dzFg^ zrhX%Q#rXH+KaQbD>P+@dlw8GTGLFm@2`N=)N}{g2MR|=|4<6P~hAzjIoeuOrPGt)= zg!uH9EGeGAFNjL55-(G=3O@%hSH8b5kS%ZI9=zuCe=0(pL(n48-YO}ny#7}~F80n{ zYk{A7r>2c(H|j^oiCQ6dvPl;2g}3(NC}a}%tz8h)n-Lesm^a-xDuk`H`odwQNd-I2 zUGDUAtctF!s8g1|VEVb_xaXr!<$%xJHbcFY2dru%OFmr8(rq11tqPVXWOZ(_pVldz zN+&gi{{T8Tq;Asz&$PetX)&CXwK=HdatR0ye4P;%67t)4UrVD_ zJK=gY(g46|Pwf1)zHB8YnyDeA4QqV&+{mPg&*z@prY?c~g?e1i;7rWK?zOp!7Z)?} z@5d;MtJF+|sc%=T!xBQx!K6VNK8CSL{{WSjtoI$cgqszytJBQp7VuD%tJ^0{3Dohw zXBK9zdp57HJiizzJ!jXe%0|2hJAaWY@s|5rG>o5>Z&Q-(y1;7E)$r4-s6EB?~3tCbjz+sQ%-&gCi?Mu=1 zG2206XX#XzQB(bB->Zma zWwbKhYI!WOX0fY(N1QNpx*Y5Ns{+L3l~NXOO?atd{m*yfo#C8UJs(5OS2FQmym#tE zHJ5`jCYd;kJ)YfLb^E(O&frj0n*btmG{M!v8g~G zy-o39u*>;W-J`;Y4olK%i2M&YH`;=nh9OeGNW~K=m$;?>QSW&n^`GC0F%|u;=ZIN{-YH)O;_< zIsB;lh+E0FPtUAnreeh8cXKVS_R8S)(jQ~rAnL#EqIpYKnptyQRz#c6D)(po-^zrx zg)P98vp|eii3)V8UrM|-h1U8vE2b)Ja(=HXh{gSubSM{u{{Xm5;n~*$VhR@BsqzA5 zYcg-W8_8-WbxD8q0e36evi-`^o$^a-up*kDwy`apK|7~ynT&~hDN@hx%G|uNzH9PM zdbaycmDv{@z+l?)BxG09Acv*N;gt8 zQ}aUDe&?UB+5ATk*^9=tOo_Pmx2Z04bSRW=`?I+HQiv*mYgSn0GHzPXP0z9JmK;$T zsxMGEsNYEplD*l=|)cZA= zf%`CY{{ZSc?CrSI%m8w~auMiYs&m@>LS;K(QZe7(kdsrbQ7_M^j&DMqZz_iEOB)xR zK%SNlu$ws4)jlNcNCF6B=wy>5{Cx-v3B!K{IcMuOLuOldJ>N+R5lr-Z=|MzT+MA`5 zQ~ur<^gq|7f?muXmvYXMKOg7OKR1uGT;;9j%b6^zq5zx*FZWhhScz5)yFRbg^A<47 zIrsEoZEWMg)1f>bxsK%b%mt|S9e|{yeDT1X-$E4DKl+5QzmB_WNDr`y<~!}spnePa zVtk9(MgDg`$Iq9?&Ut6+9oD^gfHfv>{9<){1q@*S01P%^jLRxCx50QQX6}4=m+Qaz zhbOH^=Xa)@XFTL|Be?GXQH>0e;*_a#TQaFVPZ7k6Ql#R$arEU&uMhlb_2r z6%3Zada0v!erh%+`|!1-o znV5g!3zXQepJJ^vwMIXu+4*-`KKF2!?y7ALc0oFKklPr%;x?` z@d;6aDa9?SN`3@Ka8`dQPGIk#H%Hvz5MRc7Jic^Ew0=Px_=_FuttZLwTXo6FcD{@M z0F7TMjzsl!}m3! z4MBR0I^6pLdKAaiKCST?ub&<^EJria=qH>XJb*NStQu&>P^0w_qZxe4jQ*V%nv?bc zb5^LuYWijH;{~mSOwVj%{#e{pLqp2m43xb;;m237lVWBRL|15eF?wV3^Se%X_efPY8Cew-_T_JitCRE;OSr@m(64{bS4u|mqwejE7LADwbNmxW$xpf~>jZ_anu zX*W76!*Tt;v}tGC1*JpcXf0-9kCAGx`$PEu0N!WTI6MLQRHqVu?dbZE>0>_AK2X#5 zAT?|+k3ZKFgX&Xa?EX#op9=i-=B*LY+K_3WgZ|b|Uc5mmSR0`4>u8Ao0D|XpTAb>4 z3+aZAuh^JB=BE6HVP;W=sJ(v!?8+UXyR#REXQ16XddgBf7fTjlYU!@124ihUftis_ zDrM7PDmATt@U;f6K2H+WUC1xS&YtQD2d4pDxP}RMtNY23k8G4Wd5Wt_!OGfm5-pYw; znVtzHs@f={ekjD8)OeG>8|eG_ABrdg>*HslVf=>Fx@R~^)UJG_IgNS;>P`K5PmOMA zXw8y!c%5!nPMBr4q(6@j#cGg4O>E$R6M8rBVz_I-StOTb4|LpdKi@`@+tDT(*j z4=TjrPvY!q7(HqPq@X5m?f!av&ku2}er{6$w#F@-_pX(sZ|Am}ZcXa=_6>4VoVYR? zVK#kQ1t-^8d0et%1#>e$FXXq9JURaW4G6xcGMDRGH$!`ju$H`2wQ)dkgs zk29!r`ttr-?EII5Pd__x1*_AanQgZ@Ql%Nez9ho6n?rNrtOSnN4V77xoYIn)uPl`|1-U+~3C6`>u0; zZ=bdk*0UE}9JsnRqutf5@dD;_)AQ7SW}s@fr+F;D9bZkKdKx)Xa*#4ttLJ$+0HlAz zDdnQF_12ad({W>z%Cy(a{9p0DcOD;UIUU~qw5X}RL5h9K=bZSzv0v~<#T8yKf#<92 z0+iNtKN|h_vnMB>kayQQ`VfcS=^Njp%rIg`RQiVu_rSfTgYgzUHd(x{$))^+=54O1pwfzVFpFuo`%bd(NZZZqJ|a_rXIPZ1AgU)WX-j(1AdO`+@+ zgxNnc-RxVLpDXA$J#iHia(lelPbJDHXQbX`=<$6Z&RcJxdA~OyIP`@GU#r-`6c?m4 z>y;`*RQlhUmKw8AhW`Lwmi+IDc(Z*CBf?$-P|X%FSOB=^u~WO1_FY>{#dnh#QC&%@ zCwnM%RC9v`pdK2$#0e&qdS~c&ZxjmsKXOFk*YXyue5U&bw1$}IO)DKidwpKWvPJL8 zm|u=|jmw(J`X9Aw$v+S6D&TPpz<=8DIKCN2{sy5rRe|X?UM9Y+9Mb3hIgVRp=X82{ z>K8)>V*6Ua1Ce~m%W&nU$LCl*DZGB0XHJ+`Z|0`XgS+%}<(Kb7bb#&9}aqvll7lBrfSO zYY|!Dew*Lwy&VfqG;(?;)x)aA;j_{=!*G+l6-Wx#RC<;B)RhR)B-8qtFB8(4Uh|^7 zMB0J@Rkgl_yB4qVuy~I8u)#zFj7NZC{dvESek1tzRMQL*^I7>9AD-K3rYdKB6`?mr zNJ_4Iiq^{6ETBthL-#Y={Bs*eBe`weqn|gNe7u+WI#o8rL&^Z-#WAhK^`F|K7eU7w z6m$z#kJlfE`4CqXz`-&S+_YU(zLm%CB(eh4b9lh!ylGUK;>Q&7X?@wH!) z_W|>EIqFk9Yk=rY(MQ}{nb79DnziKerL@g-gfQig&3e40u4>u&1s!k5p?vz@S5jJz zQk87*cv0JeoHZvz2UcSlIr`PR;NSAU*I%7R3za(`ktGiw1?RC# zl@5?Py;7on66w{gOnwKRSnqGj{W1owgBIqCT2s#N;X@5w>N)s)N~7@_o&)WKTR9~2cppS!W0LdWi6v{&j%d=Z$D z^m`{`7SAuz$+E=wlS~Zo>7S-*Jr+*QL$?uNZ0*If`b{oVTZ#k1`5YE-zYOI;*Xw6? zzMJ37nwc|$6_eFE2^6|$AnAPuv(&#QzRR$3FWcX#7H4AYrk5{zvZi@nribn~OcRY; zszmDdwQIPTh99@0$i=?rzkI4^PRBi)S3h&v&d@q+n2FV4w0W}S6rEJ&YSoBOm#bQ! z-|+4IaN+%ftN8R1EpYOCH1gH9_I>(HyK<9TB4Q!hXCQ+rv6G8zteq?RCaL~K5X{<=pisYKxJpIIU78Rwt=Lc^2yI{ih5oc8W{PDm!3Fv9>B zgwqbMFB%;ubX7fFd(-|$@!lM7l*KZD6MizwhJ*U6VEoLASx98hOIh!hwDo^%lP}-9 z?+24&6;f!8@X;_4df|MAQ1o0HK@vhE7eEwmywz)?bz!?duLC*1uT%A)>4irgvDDub*Ln zPS^hct?|6@>2$`2ix>b$z&cK zEur|SPn;u;GO4Gl%ut8}_uIH0YFX}U02!7)c~0vtFW$RQc36z2$Tc6z_e}G8>AMw` z545JMW)LsiNiD>iazXWi=2FZ?ebc3Fp+rmC-n4A9nUf5)A?wUYd2V4B_OUM7k<97z z;&uN3>))uNhBSdm_m#{6@JrbRe_KMA49!f6rbHHjaMt^I_I8sSXtBG{_RAkMl%rhk zH?y7gyUE%XA5l-Br9$u$nsBewpJIXcQNo(ug~4+tm9QLBtZS9Sq+RpN7Nk-r!%F`pLuDmGx&0u8?)J{ zooQTOVs%l@p&;ik{-u6DT-0{yto`GKKnj{O6MHL@kxGHOjpNb&BfP@91&n(V0cu zr=C`;rXsFt@e^tP07SCQI-c1?RhrVw#Y^m8lNb2&u@iP$-OwFA5$oy?99*;k$n!438-6{ce$z)r}nK*Tl?e|kP^zI=Gk?}%@ zW0^}7h^C{PK1I;{LCCg=QY@jgrsHmWqE`LtU{s*Q@;v_l+?LvJ@f8oUkuShE{G^^q({WvbHI2kOG&}=3VM9R z)Ay$NG%U=c54zv4>H^OysSo_1OJinR!~_ZKOva>wN9o7wpMUHe{?@-9E#1$SWUAl( zGj&QOtbkS`UPJyqGd@4gZ~p+( zh2@#^g-;8eo7(Gn_fm${9C$7WTv*;O#cAq;x2PuOkk4UA>r$1Gm|xkS8t#B z7qu{x?@NBP{GOiF&ut|#+PKWnt*UUNQse!-B$b_8`sBBf(Wbx2yGu~W+Svi~Q_Fl! z56C)g)hs$UIs+^s*NkGko+I^SVFU#q_vqoF|D*SBcM#1qjvC^{=`3r_(oV*db+ zDdw9bGSK%Tgg@2l{S92_v-7O)?B9Uyj1)^RuF4>b5vFn{BWO|1 zr^7GkO;RhdDl@Ka!lqc4F{!~bj;LV#yyB6Z22IYYHZv&%M$nP{GaqmVt84iWY0R$= zR57M)uw|0AbFBPpdVi5e%)j?bdQZx>9%}2$Lr~psg;i6>`R4sq^U{BAGng3EREAJ$ zrVi;>`6!bX-fbvv9h(5-mRsW|8_Vb6%>#Pha_8 z@n5_=4^BXSTpx^nQ$$d%=wU#08ix55g4Z*g7py8u99S(ca+WGHoh=g?)?6p`Oj94o!ySaW^DI1iyJz{&JJ%HH?Y}DMe@v_Rw-uXNsx|(?){B_gtNVsYtGez>mqWLG)#t?^l@2qysEYt{{SJSJ)g9fEO`;Z zYt+-BbP3Fyx%Raxa}++?3e& zAbL8`wE7cq`k#*UxgE}5;^+H~JgYP7^tN`&0FTsy45`HIuJj&bKU+It?>3~1KCE=Kh!grO0I`Lo!090wj%oL ziX!4iVyLA~zXFrF!cJe@(Q2uAC4Jhmu(VYc9H z9d>zZnEso6XH_p|`ID$?JU&laUpe5s-xZEyKM0n%^S#vij?)J)3x_;zP1(_MIYxnc(K7bh3{{U@Ev0&x7!GJ2E+L;1%;4}S=12+e(+9bly z2tH)wa$3Hhc2T8sx3}uDb-q~N39hb3GuP*U15Wv8p%bJDgy^@E%huMw%Q;_DT5iav zIeGNXD#uTgs1)0K6zYn$Qi{{$V~4w=tzC(CKc?Ry41Ud{^r%m@Z4uWlz)@<+N%9<9 zp2BKf{mDZ7ZH&+BThV(I>SvT|Wp9-#!WabDUf*k2;}FcKzT0B)OT4IJ>A_poMJJn$ zm}pB9%|atb+?O8b1496Z)07wSA~7&XT>iQdNBYx3kySjdVJFnAVKc}4-S4LTS$ax4 zG8_ZicLrkpb!$xGyF$-gmppdD)|Q9aOxs8RS@mTpkw@AsS$&l!W+O&ZFK#wz33&1K3LiV2*;|MkAO$%_)QTti=wwRYuG5879w97)s-M7q&)$Sq3d(VodRfr> zen*(*Rc1QKrowQ_n;-Qxrd#?C#Q0nQgW)LY{`cP?f|63F zpxs}k3e(z@6>lzzg@SUl=1`On08 zkp?P%SJ?ZTz9mjoG^I`+()Fc8KCvB+Jrw7+&&#Szi>Vq)s{a6OmMTC&FjCgCaP#RH z(!uo_k)Fmh&k0_@FP%JXg+d)2xhLTjvF(6c=iRVAA8$%A8LLc@GI2wrDzg{~-{{SS) zazF8Tm=>hIqcbqPh&TS+?sI=RpE;k9em`({@r9_WDd4UoJByMx+Ddv`_ICc2+n!A_ zliZ}Fq?4D;{6>9GqF#S5to1$AhoW}zJ+ijowu88qh`e~uMO71PM-^>Uag=0Awh^~49T zQ$EbV_Nfa!Bb3P*PtUkNlRcj824i^fGofNR_jUQxE;+4Qmp3Zxk>6YEiHDsm9l8Gi z$o~M93Uwv-X@9obnZo>~krv^&zO5|JqfeyzNl?@Z6%hSzm4Nws*H71z;p|$Af}RgY znPK<(G*O@K6%olak@%^Vu4ap9@HZHZ2tfnb;=ZXZ#@hh@VDym!tlCp+!#zpZslfI&E3Mntq>LiYt!6 zQ!Zhbi|IctT1h02k5K8BYM18m-;>HUfLbf)^BmlhT9#Qm=p?DJ(K2>~+EpyQ3|a_G zsP_~}Y6XZrQn%h66?NV;SXmaZ)+Lf}{@*RGPpdqQ!hx&Q&t{6=(ark)0XgKupHg}c zpG5MsmcK#>4jpq;iPjPrkV@D*@n~@`rpEoU_0^3YklG)(-pA)&Xe#yzrAKSu})k4qKD6ezGn#TVB0G(kDw(*;f zsZh#}N*&6KdK6=k>Tg3*_0;L*zbf&}hYQK#BQ~k9IOcT)(2m!9`J#s#+N^QomOcW> zfKtV434iOOO((2w8n3)&*6-vT%0T*8WV3VeQS|Jv1!0S8S6uwYQ_q!B65Q!&=d31@ zYPtw2a#hl^hPw}DS`@7uDzPmj%mIvcHz!At4gv}JMtb8aQ@@cZ{{RZ>WlWJ%nK1eC z{1YXT>S|gEK7yQnz{;=U;}m+=EXoy{{;xWoqMYQ^^>d`6y6Ir&pdDw?3fhpP>thom zMQFjD9<&sSoI%hl-Oh8OU)1sFdYpBZKgISPE3P)`zNrdBB+97jZ*$eolCkl zL3b>05Rp9S+h195 z^4!#Z*1nTdDXwi&8aryyT{Y~-<=`wV{dQXC2V1itbfqZRrW%*ao1@{tv9xpfp+s`@ zWLG&!qv_G?ru_s(F|N@Ub$dBS53xV589~)JsuXnzods}8HMNLLyAsV4$@1F(X9h9< z0Cqhj{v&nujEj;vexi9Su0xc1QE%0}x(loBms>gKlh$)J0)eKaLpOofx9`j8s#qOd zNl`lw)zA0%u@bCwWJowi`)WS}eo+~dQcnh&3M~}_{eY!%U0^jmdY|Cc+E9YwNT<5ijlX*Dv zJ#o=fzisGLOfn*9oToZ{dz<5{=y-gdt8rWT@p6`m`Q22*!F21?eX4oz`}vf05U=q0 zSJ{@C{e!7z)Rqy-a~X)UVs)B^I-y>s>zY!Xq=5#rzKV(glRZ zZ{!jL3FTaKN!4HD{lZO5vFIqND1WxI>sP^b;KwzqK#rR+|$2ZFZeI?O^g)xS6ap|di!n%Rn zq4m4RDJe9Tz{Zb(daxeX=h+db7JZ+`>ly-tH>jEVV0V9Fz!fx0CM45Rzlt!6XVND6 z)#`tnSD&d_2xy^@1J%C8>&MxL?#BSyOyuOSz1ux-#B_8}swlV;;kI*NSuc zji9oXqZqI4klM=I)ZAW|KL=NyTQXJl81`wbeai(0h^m7AUp^V0WBNjNB0fb&nT!}` z9p+muwIeCKYp#SF6yNOmf5qM#rR9cGqFLs`a+|*6oFhpYt=_JFs9))&bCaldqIa|5 zigvP*@Ts1^)d_`m+`^rH_AuxcDU^m)Tk{wceUE1Q755y+X{Zpz>R93cmL3|GHHSwD z_nhqhoLXEd=YtKO3PmxWAE&>4MdIu_l@asZL)AQw{m(+32Fpja`Oow&TD~)W zvQ!?LS3A;1=dy<>spqc;KIJFHXLGZKzWkiBhTGA&GmsSFaeK)!iOyk{hQ{x9X75(S@Gn(a?L|oqI}D(AU#f z*r9Gg=lpo&>JFfCXx28!nUeZkFPJO^m(yCuaj6V$poL>co{#rCP~RLUcE1_m^y^Pc z9GvGqcxl!{)q>?cw8KbxkXBAJzwK0-@Ns_dWtuJiec#8ObgSoc?9el<16rj?HJG#^ zQIJsQx6XeC^!g4&9$WXKx}#FE56E~uUWNI_T@UDfCt^{N_NQnN(@gm^>9o3tn`y|t z=~<4zDS6=|>8$I6ulV(B9>h!rsZ4Vbx2IEQKcmFyISXwI+9v395~>upHhP#0&@b*S z>c{xcB1d-gtmoIu>2jmm(K-Gg`$w0SBl$hfy* z@i?JQ`0LBfbklk4y=M#gWSJ`W&cmJ~nZ6oKk$d{+mp`-gl6JAJ=hU2ZJo>(UO^gX$ z5n@S=4D?9PbCreQZ$A+B@BW~EGdwoi8iI9kBft}66Z_jkey>1O=Fr!)hYmK?s?|Do;|6?N0)+snNSIl1)sOCPmbPRO!Y0E?3EksYO65++9z}sQ&<8o6xD* zJal>wsvP{*%`zU9F^iSz=bE|Yheqo0I9a}0C;I$a8Ck>e$v?uU?q5lB3YN zM}A-b08jqqR~GNay7G@Eo^3Lr91h1R-DevNQPY;2dCGs@jnY$CImp7_gt2NEKOE+C z{dylPyQRG3&pSR=#xKTBk4^sZ91Jn^lt+xO7N^cSzL#^T_H#<854-oT17aHV62&R{iki9ST@&IZj)Onn?Pv0qk!_=8R*WurC8+gAxul?R z6Zn??Aj)c!x!32{!t7c>{{YtOvz@cF=18ex!5Viyk1k15;T8$d;#|&q7sfuU-3S?s zZ#$(NhLE@A%>Mvcif@6grLz#tSJ-j}deUmI`ECJjy)q}6J^dnEbdWD#Eqt{Po-O{D z3p#4tFwHC+^8)e48tUrS2uIW^dUSNciGG^-t4iDpYgP1-x%JiSR9d(H0BEVgqV|7Z zN4T4c6?ev31DnvAmN6(PBVIC!{?2UoQ|MJcS&@oXVLa^>;xq@7`3Y956LoLo^<~kD z`1vcV%l47!>v8_veG8VQm(Cz4Pwng=DrVbb{JV@Qq!TYGo#@)6Vs@JJCq+zlX$jEY zl*Hbrp#YzsSysV5ligPX2dDc})76`$l9A9zv6ao$B!y6}kl%jQ&ap~j3w0Cy%oTjJ zKTqSYjnclazCyN2sj8Ff{^Gq`dL2YZ&ysoABT};Z@^7y>eR@=^1nXUYf1&v20(NI6 zzL_Q>?Oxy5L!QB9b=*=*xJl*#-AmbQ%JKgIUTjfO?)d2v%l`mO`8_B)e~+z_Q!U?+ z6x^R!u$y;m`wWS7rBnf|)L_~>hAh<>j+k+UX4%$G_X(lm=PZWJx` zCzw=;F$r4Ujak_BrK^AOV3zsk`QP)u=j4n30IR?K-}CeH^Zx+OewXURFQLzh+~>&E zwWz7*$-mc*ck$Sz@J9ata+yi3!U2;Pv)R8lAv3cJbHz060%M>2@0t;d-}xV3_I&>U z^>;LXPQKIeR6t}u1>)wH&U&Sv%<+2DXV0Vu2m4xNd%2n6QxvZRn@a$Nf7ZPn{%BJ) zM}2E)Ff%hhm;CN$>he~HnRe(R!XeyBhnLLpVF&Wj#uu32kdC+Oo`1*kAI15kznH81 zzmq>tSYAHa*(Yk1kDiul{?ABdlL>71585Fe(`I}|h?tdx__1&&7x9Iny(JI^{{UaW z7s`h>hnheU+O2AL48s-?)akLiVsiL7if$6FHkf z8HFUwdT0s+rY5H`D&IX(VhIz_D-S}F=R5V&E^enAh@F49PWWtP6Hs}8x{*L1lI+rP z{Xixo&)n~1$(aZ$Dv5Wc>*ICy>Z(iCD9eeSgb&!Nrp|!*(e~u(GN|gAzre?sC(r1$ zCv3~1G%=qgnNZ_|n6WJ%jLgCD6VUO({y!hX=UdHL32)zF3nwMu7x)yY5|H0Pc>Qi|aJ5&t>zECyH_XA+F<6$5KoLl813n(0EMKs2vu&TX zQYq>bvGQQrJ|T0HL0a!>{cq?qXH5RWM9xO4lJxJi2?+gIwzbphv9(Bqn!f8Nh?c6K zqv>^1?l}c2EQO(iRUHcHN%+;N;q$ni1?2w#kTmH(v^8%2H7BEXk4^mmN~^cp+FXqL z*=LGrWs|&(uV0T^{*yS~eNVk#FXD*06$qwWVjpt!T%)WT_T78RMQ629dcCrTXQG}(6!M^{n%zR%(6W59|{|b7HRvFo*z(> zK94n-v2zr=!N1t4}oF0aVI@wuVjZR@OE~V~fRS)v_O){!jwL)d6aG52$;P!b@ z*|Yr}=qdb<$)PPwtE;)q@Cm!o&R<`-%O-;swVMndA>zwe(?daSIpr!jUqO_mb7owQt4|Mk}%JpYUiDUq2_QfO7M=>Z_H4#b#CQ zZ8?S3$p~DJ>3EV59&1ywZ*G}ySFiaL3qbe#bKMWG=T`oni&IkT6Kxq6-=8m{bPqv` zn#@doUVxk2zlZPsA0|wMi#=oK?M`{HSZl$`JcB&HvjuEt@pr?K_|$k3^FJj@y9iq| zJpxTtXti?+Ojn7b{$txOkF2KqAuo=lW9{{QPHGX|>vemS%)Vm#3{sdYY++`Ft zOms~JH07aV{+ruRo5OsiB-yez)iOOzQ#=(!vSTq`kA>z+!<*RJpYCkT{)Oi24vHPGUpgP!?WJoew{lcntNd9vdACSEKXY$T}MY115FXAZv(1M`-h9ZgfR)zGPAw=aufn;*hmgSSM z%};@?OnsCYEP2#1K|xIB4FL43!$h+0c86#@~G<7^_9|a^X0FfQ?w{{>OAE{IU0Q_U*Ig$9~=yiPM{=%rfz=jJu~DVLl)?n_f| zBZbXd#9rA^R(q`W{{ZLl-S$6P;afk4&&dbR&-@;b$Ho@?ZkNZ-`47*Q{O0C=+n=X% z`ZFcG>!wvMotP|^gwG}mrKwKMOIB5Th_a7j2i87W@&12bmqYZQ7vst0(!M2j$V1Oc zDdo-k%huY{=Mq128fTa5?0*ol1%Fh75_j>>%zr4glztod-{W6NOXV=W=h(72p)0Dt znklB9TPC6IW0BBI;M5&R0#{byY}La=G4ke%?)kZbfSMtb7fm($bSMp)PbAoPX_{~v zm-w3Wah;FKU-Ul#$Ae1#IB$@{MjLc z@#NYci*{T*`>I;}Z0)kpV>!I#p=|qd@_OqK6W59;>5U;5paOwwZkwinHDAqZ>*x~2 z_0)ycZv+{{Tmlet#RexUF_)&&SUB{JYRQpZ!h3`TiT9uk)Yd=i~k}{&(Z} z{QQ6QKh6&S0D<}4AB`gU56Aib=6>9)`#JIQrCmz(zU@wYd)t$mRG{DH8TQZoOu*V# zOA9qh4fSSk^CNfv08QV=3X!yMjY;db#tFEyf!1Rm(a_d(eydQ6M9_3 z7emnFenUZMiO)%W0s0mwM3(glSj~5nyusp3SDgN`&+keNBGO09jiyA{CbGkhChNTD z@|8$r@( z^yui~-jk?7to4#a=VAW<%Jz>Fm-W}^U(-y|&WYopj&GzNXZs?kAq0!Wn=I3)SoNR< zPS*RdSN)CrlbdSuqt1tg`43+)zIwfX8=gHleGmB@&uuG8MN+Vyx>xcG9QL>LzT19E zW?6|~=ZVuA{#8bKk&Y1fBR)Xn-yI($8_+Fw0QT8}QLms5H_g8|Sa#@did(wH>x>#m zDp%|IM)?oG;?5*-P_Ny3b64GoJ73YOr(YEj%s)zn=)Qsf0Am~P@WwCK_5T3K zw0S?9UcO6|E{wu&woZ46kBV5$nmoJ69_^@Qg|nL|oMNP0Z6f^x3O}PBJZQ=@g|Nb~^xcwW};)Fn?m`hTV`nalZLB5|Oa*w-%9xRmWgZl~_BpLyF^V6k-A1G&@HZ{LoK}4Iu0Bs2>?8`tQOR5h`NQ&| zl{)!!!NqnV%4+tnutYPYx(e;{{=0iN_j&D?II4Ei_j_Xfu93Cnh0Nokt)%)Cn?f%y ziyJbk=~sytwz z(>o2i(rSNIbuBKVKLz|>t8Ba`Eccq~7~f8OeW@}_6bv}$rI-yxD#l8jbkEuV=uavT zgOwz}3~sruu8LaX)(v#H+WM#g6p1mFQK%eRvXjwjGoNy2S`%++GF{aLd+3M#SM2Rf z-}veA@qHRC_F#Qg`ek#-34AVs`$pbl<(sS0RsR6B!^ia~C7LtRVdX;}l194KXH!+E zcD;-H>i}OH@rnNcBK&WS%583YX`D#(?RQ~`VG^tY;v&-M! zdh6-*86(Xl<@LF{SpqXXdi+&`fW)Ecg)y}q-mav}CG^(~$deS00>tD6|uQGFe z5-~n0YgER4T-53AAq((K_dG>y6tBcKrD8ppyc2rcpg%3oKkak8*_7zf{3_3X>Scc4 z^#XhDi|%x*RCD1J{{UT#*+ycs+7-FJx&xhbYo%G@bSul*vUNK2^cuGPgCuzK=2njR z=cLd|>9wQ$)A2_`)L05hN}1HG{Zh3DsmX3Q^E^^Bf#I{xoKqwtY>KaaU{^8RQ zZ7pc2jB4vL0FHMSe^m13BmqD1j#H@d!q<@JKRh?;p$cIC0D{IP-c8bP&gj3)pYep! z@bMrY<)y0!{z1t904w}2`}%VHF(MqcES+!Cc91Na%pEHpl2Ng$)?~d2pDN+13!*Pz zDa&QPvYh@%7OFwI?~tAH7uv)wk3GpA<;+3xk2&_j=1Gd1hVm)z| zDIZ<~T(7&L&=!A0$49r_FjO4%^DR$By${Ay%CV+$TNK3wd7t}wf7*0*SyYA5qon3~ z&DT0mT;_I9FH^D8n9hr{+=5#hT*DwMQSV4#Fs0~OVa&rE*Xfex6manO zIZqdF(Xx7m6P@!%&|1Tp?dRix4?Py+7(kY77&=~mmVP%?0yFU|C|=l@$}^zMU2ZwDQ1SV#mkHb&rbk-{5EDiMwT0llHj^55Lb?(J~v*>C`aF z(xZ9UhkSB@uHW`|6xM$aR(+)p>`PoQ(~tIBT1~w*;VeZiffGL^Lo~hH?S9gfIb-X` zhOKSU3WJ0Jx{og11oT(X`L6aTN>J{{Y`svEV)$!};YpXrzjF`PfcX_^tecoVKcoG>67F4g4cifE5l{Rjo|2z)1H{|UUyt@_ z0qTU=>_3k_w|Q*J;8!E4W~|UNVaF3%?t9msLGEadhp6+!{mF27t%1AFUn>pWVx=P= zj$r{kr}2ctVP<@9#``??)<9ZLF$JCSqe@qRl^Y-I8U@ycUk9ZaG_WFpk_b`sio^w==dRN*}$!bj_) z`09TYBcrR%6}M<~>g5U3LjrqY0oL^{x078VLXhP{sX1|}fAzMx#2R>A{B@9Yxl}Ci zu~8ezLGi8g5+`PVaVK?RVey7V<(PdIEl2#msHubJiyY6EZU^|yacKvJFtZC6qDS!U zF@{KHLn@&v^%!d?8Rufh)^!uXA(lFz`1o^4D$sx0!}1@scTcM!Qs=lG(6HG~P}Z61 zd_Y8iz5D+F*!=h4zlfOnQ}SPv^lXj;lw=-1Blkt&a?#W2&r#D|UjG1+^zu}dS6Tl6 zWm8FwM?21@h<>aduPXF8HdV1ILF#*Hg4AC`{qv0fjf+sw(?`2PTYirveXp6qmsu2ANORZ@EQ zQp`=Y;?StI+PQ^lU#&{@rUZFd${9r0P#6QTNkg0IQ)ysP=$8rvP)cR)_XujLIY53x z_3h%iORbWQd(|s;mLOtxIZ)7FPsvw$l{K8}u}q?2G<#W1Yie?UwlDgDIokY=9~Sg- zb&>J2nU?%lpvq6izJOR5Sa|Q+@HeNwbb&EHizHn>bw6O%u=TdOJ?omDzY9P&{u7Ro9r5@E1CU|zjQw^8f?f8&!myTp~xVi7e%eEZQM z3?N%Fx&-83lAm9Y0R6{Pv7o?}QP1tkzyd|6z!=IyADUbK35ru(uY)mLXv}Lri8smX zH?ynIw@#R+{FAQv=SC^jKHIE&FU5=h0FSCJwX9W7abQBFo+nPhLMiPF>rZc)&6?Xj znqu%r)nARSlivHAkI4>=Br_H+YJfKuRCwtQZ>BVZ@J>`01ig7I|Xu zaK>=;2%+lvCtRN;_PEV+oAM^eJUH~2o0l`wof5*r2_}}^X(fHs000930&;J022mb)6`Wk}S zB9a8Jj&#y%#SL(cT}KkVg^D&~Z&W&;9Wk~)4cXhXH+fu7#$$Z^$f+$FE-z~?>m5G# zbe5o?rHj_4)iT!+PckO5z|r|&%{SqFPsr0pSwDvMc)8s6lSv1~=v3&sC>c;*8n4`s zj!atBmRA}=0;4`wf2b`=*4TX_{!Szmm;pKf+!-gE)SyEFk+n-k?vMbFm!mX4S2&DS z8e8bd;z+F<_5JwGA1{O5Bk?o%Z?F6a`~dq`y_R1|M=v1J-O^zB84o>$q@RF4>nAC$ zQpiZR-K+C2U(RDVK7Vgcc@!NaU%ShIViY)jvLJxQ-+1yfL`EBKl!rNU%?CHx1=Yv03Ux0}noo^9iA)^JHnmSJJ? zftq6~u=RccLswFL$_)AR&T9O2Z^b_q-^qGh>AJUBCoaa(&gIkFM`=#`T^cbUu8KNW z9n&EFm15ZMt6t%%>w&+V@NfKEt#mTNpU28FH&;@aP)_b-!z)3 zADm-#&3<{kt9UKk~(o%$6Y+ zds#x!ll+D=^}OH5VPg6+($`Z)Ra8^+UXyBoeGweu+UNBNktgbc=w5(p1Q@-Rj@RWk z42QZ?O^|VRuU`3P=u-Yu?pjQD*0(yg)?OZeKOQY*j{I8YA0mGlx^#6IsyZDT+Ur~-#)~0mldXkC>yQ*1$Ffxh;bu&ee^_Q?j+lIDIs=G(^e;wnI$3Rur69gzZ z{(zQf7E54~+g$5CBg9Pl$(nTUv4}Z(XzcjH=YHK~Jeb)Cc(4|^+wEe4x!0uS6835g zY5Q1S_AXKkqlmtxRP#D4+srB_+nGzB2)T#m^7y5mun*j<^3^?K( z^|;j4(`P^Cp0&&_e=tyq*Y-1?%IgSpqzYB#l8L?@a-p25`+U@XLBB$0(awvWo|r{* zRXWOfv&&GQPq`Qqwlsa8Yf6(QZ>0|W%aa7ways9-Do%QBp1H5#TlS+nlV3vRPbpng z@&4Z_bdNijbL~L1np7#79llkk`8|BbR}v!hbXGE|HtlRp=&0l$a#)5FNdEw;Z(_B$ zR4d;fOQmE@=eHWdkDhh^08{4lj%}XV5(dE!0Yh+)`^J~6>+2VT%92UFwN=(!*o6<+)e>} zfh*5{!h94+&yktj-e$UkXgQvOjsCn={jcnp=kso-%h3*{YX|I-QMeHFsUe8|#n!XXxkG3#ljP6CTlp?rvIk0p*z!KFNE<~B zvD!X@_C1x5Fm;V)s8{DlhOG*VqRv{R{Doa&=zXP7&57u0TDm8i(l6kbSCj%l9mno4(1o+3D0o{huIR(Bq{Av zGwdhrE=LS5KVH-+v+7YokpBQ!(PrmJ@cnOZUE<1E1YWt<<5}+J`pZqMS(k>Nt12PI ze4F8KqkhQ_N<>s6)RADf5vyLbzDO|Ezi9?%u~^nO(4&``N8^?BBK)Fy6V8UkrIoIk z>Izva)v5N*-i9OV&e}XaMv|>2iO8sJKaKWL`*|a-H0M64>pGLuOXXjR{-pk*RPInx zqNbk#`k6$ebh&A%miAUM#3@BIpS^FJD!QSlDIZWZktVOXno&}uj?#Fl77KnXJM_Us zE}QX~4!W*5zxO;)&Qx@MvXc5Bef;i!rn&|5OqvPm^+I`|gQVzDIi{bn&}CMA+;b0D z${R~6Gy0oD4z^WV2_|jUD13aMi!Z|RcMl(>$5b58$)-+|dXQf%T>k)}%;oe+O?87c z3XVSQbK-RI>ALB5g!F=_=M}o#>Hl(I$H_ z4SK^Vf|G&m}*k!vdvPp1OEUDV<@QoDN;wt#KA~z{I5Sl{{W`2HyPjj#r(i%tIKzp z#9vJ{KG$yl0AI~z4#6r|VWhTKPX7Q2yj{vyAD?AELzhn%->6|!K(O)F`-*YMl;EJqc+4mCqqJF6XdK7@e2lRH<+K8Q=K9_Je_$HOC>`|3aUefd^X~P%V zo~a+u&R|Q#p=w9oTlyT%X5Hk6jw5HYrsLnVs{G|%Dpt&dOtHK3dO8wpoats`p!)jP zv)^yjV#xl-p$Cy|RcgGbZ>w@fB1;?gA)cAHH4k2x=rdoL{QLt1^FNX32!`%>2BknN z@*m`A@z3htADto3@tA(y_|K2=rT+lo^9KCq{D0f{=B87wu0ncLv`_5}pvSf;rVS*L zuwMM;(9+O9Vx*;CBk@XEzx99Q>^_fgs`qpqtZTJXvc{c0q=%pNg!;`D=l!GO@ITZF z9Oid_8sPJTq@MRaTIWomAogUdSiens7=s+`t0r3A^QY)z@t+~T*VyFuoZb<0XHZ4P z=G~B`0NM&f5cO91s=A_41roRKCzD=DhtA|{aBEK+kzZP-EAkigHc_SM7k%z&s@x%^ zGtVbNHmankl;sToey1&t`vNjM@%~*dw4TxCTPkRr05aDuX2`wu55?3h{eQ>C# zX)TLZ?(zAT{BTQ#paNeti!rs1Q800B}8(`O^KpTXW6AzH^Oq&G#g%NNQDtBP>i zsu3lGfO)S3go8rb)7E-WlqB)S^YkbKaCMDth|wq5k<%70L(d0IWJ<;Rnfx@GlGeBy z#XV~Do7A8-7X0kqC%LR9Eu;9+{Gr>Vkd5yxGo0GE=K5%1R>*HMnR*)cC7JF_>s4 zeL6$_R>U=b<$k@{LG)nlZzM;b4`8*JY61Y%tjOt6wqdOc5@h=HwW29%o4qMI;>-Df zZtdfvixHG_!hLY9aGK4A5-3g?XU| z;=i&JybQnU%VJ{SHBEmGJ=Qzz@vrI9UMSl*KX8M22 zmdAW&ORi}NWR+8lB3{}iwe@eX`N~=l4=bzQY+nh{pqnguD_X>w9j^D}9KN?UYh564 z8QX=?edzfqwMj~ViO9-B##R*JN59qZ?ZEM=pQE?2e7v~jpW6DZZ>>sKsM;7z-SPPm zJP%U6nXTViFwA^2^54nlgx=bEeOpJ}sMk;Xr&sMw_O-tPg)g5&eT^~^)=tHanpa=P zOQQB~I!;tBuVk~vW_lyn{7x%5-LBTqbI(6Nbeh|CbgHGPgsl97^6t-9p$BL33VIb{ zx%1_W1 zSWK~@l-fB_XUo5XQXYC=z^-=tRX^4HCYe&0XQ4c7m=JpfGk9zLmF0`Lo z6|3&bi(~heV(;hs1-}bj(~i@K7K-0Va#QclP0wcb({qFDH=QG=G`@WC^h3&r-HSm+ z1d%1NzF`%wm1@SHm#{He@AXeId{;3olo?ew z{{V3l@IU(Z0dv%#{HKRKAZwjd4sFP5Qq#;``|ZVLI!Re5{{RJx@!jTrMcGgN zH#(`Q_HsG!lpe3$QAdy7_Q?ML9Ogh@%&Hg$j-KZHjzBJ>wa@2drl5a<`T{#iV+tKv z5Z?kbPdx^;HBl2@imPYUftk~{r!;Vp>iv$Qb*D>yb3+_W^fr%`=&4F$T*Coe>o$0a zhTZsv_Y>Q`#(wM9ss$B1ebYLYR@cfo%`~9safgw(zt}5^wPSUZG z?rj@SGO3wr*p_iYp3B9 ze)vFs%+)reKjltb;k2#0D9(6o{bxAjQwO4|`3{rRW}rjAvaew0%gsKev)`CPUZP_> z$1t;Q;B_fveLjCuv?A>akN517kJ(XrP9-$9qfGNoau5S1Uao18^nqWXmx24Q)};tH8}v0m+8bV%N;+ zhfdbNo;;)4>96d4DG=Y9IY`BT{fXf6dl%?0VWqSxz_Xy%J?b0MDdKavQSsK8{elo-&YA$&K(fYcp{w9~9vN7CC|A z>}~4;O4= z6RG=`$X`{Vs&A0*)qdvu3loh|!M)v=h$-sL-JQKozp}hyBq%#5nRqsRi7JA|k3Zmt z6Jt+xNdEv)Pj%`|$cb{r&Z-J}2V8Y2KYpTf<66dMJniTb!bcxPQz2MCU=n{>J*g&H zOs^u?Om^;zxfm-<2H)c@cxCCj(X!~n|(i7noI_s(mn&3)T zGz!hdCPkR5SD)j;RVmMR2gt;m?XSnk8sWUZ{$3=8_!=@b2z*!@Y%eRr+vBt?PTnsK$VA!9Vi;s@2(;C6ZW%!zsH@H zepSy=$?4|PLF`e>i_qGuT(kcGIm2ioNfl`}GN%6khGN*6>Zt3je~>1gjr{mfv}L-P zN+~BJB`r|q&K`j17P|saG%b&~>5+@J^Zx+Z+cV+du3EN^oa5*%y{luX1-8x|#$3k( zuJ-gK243H)t(Ipe=RW0AI1|^Me7>fHwl1~q5j>=&Kir&EqJW^jpgjg{Iv4K!cmDty z{FI^R@gt>SWil^6d34I@qM4(d(1-FO*|CVir7^TD9Qx{3B=xITLK^ov6Vc7*IHY&( z0r_L@d~4}o&ziQ`KP5$NeBG5b{W=55QP!r`F5z;x#Nn^G*WstMVzb6M(>?D0jz$Po}r`wNn zcU- z^hSMN*IJ$4iY2qD(7u{p#w{N+y{e3Lv>lIA7tciBbtWHY=>wVTPOZO-t0&$4(L7?K z`s)a=sJRYfWBt-b(qJd{7!87mMo)COb+I3{uC{gPq2R;;&PY8j4zF7YV#Pnv5fiu0AURQrEM>t7XY9C03W1%&&Q$t%1ZMi{E8QkCdYg6 zVBo8m%9(oQ`k;9#*-OGE6paPP^^-KzGa4t$pnr=zez{Lu)V-k}N}OA(&ub6*qWcU_ zu+4sRtKa%e}29vQA zrhhr`YE*=$9qz|e-#`m#+5Kc0cZ&~|V{)DaG@4v$@%2@_+drN!DvZ8Wk0M&_N5$je z;aX#Rk~CE`j{g8%fB0x-t3Wkgze%iEwU(Ht{N|o~`F~&V{FdyJKeT_hSkFYr_e{g! za}J$gA&=0uim|p)gY2n~x7$aP3CFGAlU23;Q?Y$){$70eqPA-5xRPEVe(XTSi!T!@ z^<*dEC0Y8Hr4QphB`J9f^Ey~$B{p%KQuZm`4zz^oyA=qXmI_bWN4)6+6=6=+-_YUn z1!sv(KMB zGB4{h(PzN?WeSwoj$zUniJv$A)o8MOV@t{O)B}oXlL&(wqJ9Iq1GRoauPLgM-+x1` z?J{paf-!V6rAUr>m)Htf4?}m=j;Hg4vy)&u{114)% zJzi6%@AMI`Z26Tt(U>Gu0aXww{eWdyxnx+fX=UQc>c%!};e-;ggE`;>i+jV8?M*mKv_bsj}c2{%jO(S0kF zB~{Fy;=ZdA%+aZtCM%OO%{4d`BmmdDZuc-g72qz?_0;c%9RBPNjVvDpfcPpx%uTm^ zx~Zle1muovk-e0ZrVK{dn%qSHS~EuNQWPa8Klx-I5(QO}~J{{To`Dl}kP?OM&r zBBRzHU^SE@Jh#?mIeQ@d6aJo)n_o~B&91)(P-VriKW(-0$)KrzbN#=s&`np!<#?r2 z71{cAHwD~|+n%6?r-3W?3>GLG8{}}ddVZX#aSMqc*vl8;X)2i$S%+O{oPUp^_56fS z@cH-#C(VuKpU4QW7-u^_v)1*Wf?3F@W}N6ly1kNihOif$v??g1Fj0e8cK3Vyk6-qA zY0dHCe!}0<`!(Gd2@GR8O9_&b%(49e_Tsjh8@iRL%xS!` zc_W+Lnt!Ym4PFT>pQJ1B&ySok4A1BAd!P3g>!H$jB>0niI&5c4_G@FZv1a|nCWZvR z?*Ph>FgpFHolAS9a!rqO6;gjEKA(gW$_8+GC&2tM-BWKjiDy>D1+k5P+(2 zK>qN>n9YTUjFh@|Q%b(5iq3rd130Nv%`oVA?nVhEoUeFakcVyj$A_v+4xw_Hl8)1RqR;TC$i ze4oe@v2^p_(X1HcziDl3`)}Q%b~#LEww=fLvss#1jQ;>XAFjeb4?1zZ3OQYs@%geW z)68|UFe|;~s@1BiYFp|FeVgrlt)tZMu=eLqvD2kgc8~HIv8?G3=?<(CNqw$rtg^-~ z>p_#kc?=7pWfN>I%mi@1BazHM{oT{l$a#zyW9TXMcKMFr$VO~p?D{}b4S%Q1;&u{3g@E{6B zER~ytCSN1Di}-1gh>Af4@^~!PHKXWWi5a*&GY!8Ok@)lZVGozD<8XbOrMh49gYX_3 z{{YGD%IqmL%gX#4mm-r$V-8EY{ht2S)St+OYUnCXS`>n$qe-mIXH)VOzF)`l=jNG9oSttwer!Bh(o_#}ln!i@a`uM*0Ho~c#SO@vUQD8i zhx|G+hp|6DQP(O1_RS{;J>4j$x|Jr^OOwsf7by&Vb<*wM<25RG58?IO%WJ0eK_y)Z z9H{B~p3ziNw6yc#dj0Vgd1)HRhyFUe&RrIBIaMQh{g22f4VI6ocr`8OemN(LZlkYa zvC8`_o_yyc8uSrWg4qNhMJ{8Aye3lxe_4LB$v*q+Zm?cjtr-1g2yIa0Z#Pf~CP!sR8RkLzf=&4~vz){h!APdo{075fsBUg9-e z&IMb@W;#{{UmzDpsiZa`JyIiM-xVYNJ&$1u+p?_f5Ng~~%fG^J?=sUmlx;)4_ak`J*{?D=54iai7GQl9I)t19+&&6!5 zr&gHmQzrqOu8-~({{To!&zvYy)-Ph8p`q$<_%HZ?s%QAo>;BaL03Br9lIE@DODcbk~M zy_nM`X^yqc@yx^V)Jqzd;ThNI8N}EO$G1qxK23~q;{d$NO`Eyr@!`1RI63{<@#V$* ze_ymRmc&!BJp4QTJynt$8YjTj`%VV&$OiMw5e>auT${*6_EKO%p zB_4*yMROfM*0As40>owN@~V`-kIs;2eG)C-pMvuB?l&>IK5hR1yx3M6n^Y8fyX<*0 zs*_uOvi|@d=Sx$l45mO~3x4oViBURlb;|)Ix^c)o=*;mqK%R~wr-EnuVwJ+I z(%gLUMb7qeqL1-NA(ozX&*voj-M+3W)z3?YHIkxcgDMHP#njiIVf{Xo%_X5-Iqwhp zcb)Hg*Uph&J?1dVcINl~#$<|GM?6b#^fLBTMBJNDa;k8ss z&TG@BS2+qM;y<;h=iJ>ON1yHg01LK585z0Xq$u=1Yfs}dPAp2fiIt~THh==?eOGk5 z2m*5Jsqwag^s<^cyzV4sqf%xIpG3EJh)=6nx$#^^Z9habFuZUpE#XX^W{xGIvQy4XUWX38PZYGqPdnbnWxSuXh_t<@zBafASkvkmOs_nMD_BeGW;;X{27*=Zo(R z0h$h7iK^98do5V+y{#3nmoa)rrcNCe_^Nc7g!q(hi!k@in&LW7b)=tch!tVpk2O)c{BTy#-~B!aIJW*d`Zn}O*_{Flnn5+T%&701q7 zzAl_T(=AX@d(!EoN?bg0VaguQZ%5+&Psq@+kiR+GjbwVNB!p)2SpD z^taxI^HGas6w0CKD9!O56!$ybPX5kKYlXrb{{ScFxu^SuuWgoAN9!PSJ?U~C>=xRC znZl{p&ze{wOOiV(xyEh956*kZiEHhk+yc*&J0^Jw%E_#%osASt_Lb{wlOqyd@pC>F z1g{Zrn+kJ5C4$_4th97BMXS%QkI#Q1=yI3aPfxBB#BXb*qt!hbCmI zVSR@ARQGEV=13e^(ffZ8-T3{z&g&-%EtZ)SSy3&4=grGMYVpHIJE=-imwivC@I3zjwEaOkZR!3~S2vy1 z{DYxSQtqa=3L5$q*8YwwEy2+$l)cdy3w_0tHIuFDS-)6vQzLm(N(o!1H9Ez#N65)v zCi2Fyfw;lNn~ZjJ>0#%Hdsap|Obn^a z!T9yJe6v0Wese<$=XXQI2(Y*R0IRw=@uU73>fPVsJ|l&mIiDNyS)jia4~}Shz;nkw zOt2OMpBwS=dasXz_V1h(G(meu+a-Gg z#YXhXDSkyaXNweR^7BM?LbF7L=>0ByFG|TdpvO1? zU_)9$R;4JZJMt>9L5w~MzwBq%&0^8+5-%bT<@}YN zKczbit0Mv|G1jFUzIYhK7o{|_d_kH{B>A+>#z3zU`Tqc{{{S1N-ZyudlXJRDF*vGgDS-{-?G8iC&|k{QIl=NvA%p$mNkV+%J%WDqORd zVs~52@6s6`4kmN!ttk9y1!qH}^=WZGr0@EIZu7S5xyWTLN80&%dGb`Qe7$?Rlj{HSzx6Tzs_hX;M+Bc|0YZR7uqGxIVwWZ61&K z@g^` zM?>;H%9qq-oofDhTqtRxXc&7MjUT-9MqHLCgcuxiHql!XVrO~y@%dWrZw*%A(MCha z8MShf$kxW^X5RgI^5QZEg-92DSNjo%=ehYbpON*uKaeG9=FPu|6+@YV4HLC$eO~m|cZN_kUKqs2|5zfYx8qjY1eWi!I$&c3> zL4h~xrN{ifm?hv@BWI(dN3Yo@Ny%cOs!@LF(bh|Jx)BV?C$@zLh`$Kas_lMG?dL5J zpA;_ix5}e;Q<)6+l9i;{*B{@VcOLrjU<+#fq`}1W$MBpu`G#pI2i$ckaorK=%e67= zdGoAjgTB2@X8LU$+#vy9>(<=}=#3Fl*&5AzEY?dW#-^fZ;DNS82}rJSX(+I~$(0EH zx>EHej-GMWO=R@D*iB5`Hq-3)HDJ}d*EYA9pCsA1nRNt~Xe~|re)$t@CcLMVMhhI3 zRCJp{oP{tc0Cawf(#=>eX00rGLErgU{=WV<)$FI6(^nk(Jn3cBFXicp2kc{#??$Sb zK-uCkA@l(2#%83LUxOkREt2! z09QsB82*#aoCVP-m#Gvh*uOvLzA_qo!DwirpKyJ`NiQtxb6CmIy&jg)S}X=6H|-fl z1pfdX=s1=A0}=h@;fl>48ioFj?zGoMCb5d;a_U`yA2LrRp%KcKEpioAgz94mGv`a8u{*CU}@L$1oWOFY&2lOq__v?xkm@ zrfr>NzG`1(`dhQci97XGQ=as(qdd+QZ>TjNf_@;7zk0Oi?^jshMB&Pdqa7vXnh)D| zu=O0x%}2fb(5u@!{@WWBO`-*&K2d^cZR^Bl`PD@Da%YdZiUXa%Pg*s9!2S>a04RQ~ z{Ew~E>CSx4cSZ9-6HUC>Isr4!TD~))KYMjR2&55_+S1l6YLRU6Tl&48aDdG*v}SC2 z(P3xs-BKJL8VX_g9{75e9~%7>c`!y5Q+!=j6PrSl{(bBZYU3oUe4?-a05FaSiC7*q zH+~dfWO*)cLwWS`Q2QjOJ~xY$^b<0=o+3~idW z&idBL>xCw4`5JwxQp?FJ>&R)l|Q%rY&yTJlOlWu3rb6=vo^;izOx&f_%TMQb(+7 zuoV9QtgO_weE$HsyW||V9&-1~J*uNNIw;?2z!2SF<5Vhd$q6 zq@XC#a8L2)@AcKWH?67=Xi~;Dag-+b@Drs?`qCzKqMxuppET9?$YfvGkZ2UnTZ18e zYJR!pVNgX=mpzE<@;O#d4_~9k5$Bcb`)X^5BthMOBR7Xw|yiHC^>~pOw9VDoV!1x1J>#{R3;~i-13*L#d!Y!)OpeProUUC z8u@hp0J+l#$m)>poUWo-g>o|#gfyZ#l8lsUL%$>JP<7J}TO*Sx%t-PS-gS`YBPXBh z&!W3$#$yISW&Hb*(R)6_8SDIx8djT&pV^!0sm^=a1Jz{TT&Anxe`1-)dMmc2;eZF5l8~n55cmDvi_2B-$ zAV)G^-c1F{*}@!cj~b)k^VUpP)%IXE4x7v!h43n-aa|ja!ube(tku;bIrG)bVue(5 zoUKwBJ3!dooFi|T8jNUS@=a3H^4}lVao?14?p)o<^gne}^gb=T-Y*?|@kCVfvCYO3 z+e~vv8tIbU1#3S~r;%?vt3P&FdQGAi&r%ZdjQJF4Y@YE3>f%eC{{WEO>a@R~pYr8* zY0AHATy)3z3}~*I`SgC&`mEsedI3J;te=$fxHYL;X$+=+09>lDzg@4#dOwfEzsHJp zblSIbkJl>U&)oVOw52a(D-0oGaZHZ2dUa{fkqj2(me7yFkEcoUq^53byAPRF5}>Nj zLXG~(kPFRc*kdL0phfFll(CQz^twnzJMQG#$D)=*`Ftm0D^NzXsVQBNB{b}>i8C$8 zV|deylfCgt?`L3`Em zp0&W@=lYMz=YSB#pu}Abw)?ET`T70PBzL06m?>>s}kDW|BREIHq`efJJP~{VSbjMnyz3 zcs#`fYHE{}wE*@vQWH@-DQWzvgly*Gn6YX;LsugfkK(=0U&n{Wf5(XceD1HqJU$4N zIq-faYDt66VB7$>Lp z`rmw!`(H<&_B}k==`EmzI?seH>#sQ1@uO=I(6=ZaoCM$7>`mTmor!uqy8WHgF@!$n zrIn#U=d1O8yVQP0sZ}9!`@>_@TkT&ePJKzov~`U#ugRScJe3t}&8X6Oo}~MEa{gJ; zAM};vbRRE zN`B9={!3iuLuHOGV!i|Mo`CF~w*E`e**>dJKB!mfWodJS0U3m}PW&kHs|0jCjOmKs z`*i;Ra>q762AQ&oaJK5(zZv`!{sJ`;!xKB*pvo@RQ>BoZ5}`4a}ePcJs;K-bXb2T5se2l z>rk@Cftsc+e|Ndm?7#9!NkuP#bDX)dRtfvZ@#@p`T`iRA-C#>R$6}{sdfa04zg*wP z5GrhaxQo$rqxGbE7SUB(wu4eht6;F{>6B>)#mSSw=;0n_;R9eD~HbPRcAx@CK1*9 zhp4ENi9&FHcjy{<^$s{Q_U)Fvo+}w?o(EftiuvB(0*+>RX6wWe6QmuRJO~$1{f=jY zcSn2_I3`EB%;!^yM1zr0#F)$Nnfx21Akl}&ABgs?mVpN z@Vc#?$~r${HZ4%HrKjf*h}h4ofZ$uaZJEB2nG;(2X}I>3${^KzkO##cTjROAp9kyD zo&H08x>@+I@1r1N!B;@%Y^ z&qz4>6-%MC;11J!-@|%2-9(=i&Y*#J>`+ssMmKKW`16Jxqsar*WtyJ?B zn$~*;pIfm_51hsOCLyDm-nUhKIxnU8N~KaS=mc1Cn)0pW49QJ!__>)(&gloNYCjM` zmgiZLx17JWJ3z~>Kj67JOPy4Oh4z(0j4~6Jc>`<4lYLK{oxz7Zo!CGVwtrF--dyAf zIw|$;yAIBWtqYIX2_-k8ty7dGJD2j^x5?-~>pJ{x7 z^`MK(*+BaNGrNKo%Q4FHXVXRv_p-bq2ZA~Iz*0zjID;2Su!Y(}Xv)jzkMst^Lhk&GHN&YBREmXxV^pVK@b zcZdqg1=PRPqcE$fb-8EB&OvZXL9_giQndTfhV2X%dd|GC)W1L{z?w?@UCjy>FPw{L z(lc06t9mB8Ff;jKv0A*Lfts-u7PQhsv~1GSuK>na{{R}Xe`X=}Yuw4vNHb<k5*4N7$Zo`EUK4o@;^BkuGEt$$|HuEm$X z>t|1-nb$|T9=j@g33Rk3V~D*4g=ac%)f`BA9PMqHkl$+M8n>7Gd#fimhPG_$`>qD+ zt1hHA&yAi>;kh@RJ2nFvp%tszRI3Zkbb#Pdvg)_4?YiW8Gre< z`%J#7$iK&m>LCGMpU=W>MzQ|@&{TfCp26`I{DA)eommTa{x^U7?Jx13BjN-(@=`QC(W3Z;-a;=Q7FDIK1LRp5`afadFzl=G6oYwcu}8ma~kiajbi{{ZTE z?vcH2^1+njDfX}EAGy-<)TZ57d47 zQ}o{sHD@IoR!{P}3&Fs){a@{oD|J1>Qdp`q*Alw_04Zjy`u9i6HXXDV*CL%1wuh>E zeAWinZU}CFoR0Jxba_X)ObAFEBn!@qfhRO;b}95dHp>90O&4y_sdNEQ+hK$u$X#FVKHUxJ)R9PbzU<&t^hUis1<#uX)QwV%blKnwM9^=K})To(A`o!G=PKN*V8e9NAn z44Ph#D(;kPU(jEh*L}ayI$W97T{Ld=Ch_@Jr#vkAR(~l?JMbT1kH;wZ#dvD{J1~%8 z?2K!DHuVkBa}q!;I(gkFo1Gu%-vo;lkAu?sbvq*;_Cx&Q(^mucsTMOiT(FwCH_Znp zIf>{00G#!S~=)u>oBWsFy=-J8DJW%Wpf&cX9S zsTxN&AETRRd#6m$b2;GV*+amAG7rXl`LuoKq~9S*!9e?Ip^KJuvbPEu*!-Hs9jmU_ zj~sgIK{9SoG>4NY$~pDt6wKUlN+Qks&8mtWbyQVl11cxPhNytE<Zqxzlneg= z+A4n=v+dIUzv59^T<18_z6jMu&6ZJobI60e>5wKPE_0=DIV0|@KeZ=1>gXz}%7ark zpUfo5gezln`snF&GYNj90$yxIQFD@yC{{YV<+d7497_T>a%$Vp8 znAEFQEdjQCYShPo=|OTdlS!DK7&qasn%5^bldxWKB+T*DChN0bMfA67J*yygN;yl?2Jl&SaKsG*D;f2=p0#CHhh7%lZY@tT{`hnS=CPph*5di-H+9mqm$LsnuEd!I= z&-AIB`{ueR(`r1G=B1~hQ{6)e?#mj&^Z4)(@$K^YZu-#A`8P>u9){r92nGt0FZbZ? zn5hDy#(*~rPuQ?-v-LOQ99QRVj+PSo-jX)Vm`g-oC^~WhY4q?tKBckET#lzYE>E^} z{=B}4qY0V*$t6Jl0A?}3Ovl&qX?mEicrrI3hv*_u(7ex?dk;cvzn+Tl;s&{suKoJo zKt<-60d-Dz`YZK;dYnt1gKMSrCXdE{5ss;@=SY-wI_~GOQb3N9Vxee2{44BOKYO0$ z+WYsPJFH4!p7*~(9(adM+|S$^9R4}!B%I$!kM36%dQ@~P-1v`i^EzqX1>o%SV@^B! zw0`3C08FOeLpE-7z*So-_OK#j0>q!PSN{NWB*c~}Z`}{OJ~BnwOFC+d{=yc`#(I5)s_>-oKF7E`aMEmX z=kxOZe;@!Ys8!J`sg>CsI;M(R2*3hPv4fg0kV>XU>tl95c411>A#X}!wwvFRbNU>> z2{f3f`XWmnKqW(>ezt=SeGCtkf{DkkZH~^8x_3Fz%{IRSZG02Sa&SkfS#$D|m)SYd z8=L1osWD*fU>mK)bb0>(k;hB;v?biPa^p;|7rPVWD~6TT^+7_2NbiSFkY+?XtMk$kh z4FFKf)&-;($gB0A5r2*qy!HM><)}X(yPW)onWFkV@_Dvn=Dzj)D$}OgexfSL^9HSN z!kyJrke5PeswUS@)jIk~a{R1?kE2&VpG`~IRLXbjp^d`a^_>9wn2fF(ZR*=hc=}D$ z!#=*2%@Me~7{)LtZb4tSvRbhksyoL)cVes}o@_~?r~ymx#Uuf;gFce@QS)=(VF zDvOw%Ez_!y2F*n~iW2lL45#JczVc6wt+sW$e*C{^MAjnM$t;Dk%EV;sRswbu6$og8 z%h)8;(7kWFD*^qd;WU0bfsP-yLk7-MdNoiUxCK-%D4)m|Yi10lw+_3dH;NT|vHcIU zw@!JUsi*Y6q0zF8qMehUC(4-Pw@JSj!`C9z%-Ngcu8ggxsoHBJ=lvo6f4S50DiHbpMd|bZ044I`^Vyad z#<$1y(_cgs&g&|fy#_4&@MNNOvQ?$u<92W%wtq3-TdbbFUJ}>y`rnNI03=2Hi+}rH zoF9+#x&Hun_}}|K`T6-T^ZNe)K0iOlF0mmv~TaZM*csacpvGsHDxFk=zq+i$Jth{{RBNAML~W zAM)0EhZXbuf5-Cw0D}6F&FFV88ktF%@Gb|7}1t#G8I^3JB+-)6WVN+tEz07>?FP16JBsYF7b8}A~*L%I@ zUV`}l0PLK<{YB!hMcTwc*G^`+28Kw<5d6Z@$h}HT@aE6pvIft-D`+?EL&NZG$)U53 zHilB&Q{2xTYm|?Y$>f_Ud-P8P6*-n+E9AdjDt{mRYh>y1f*Tjx*49*b{_o14rS2kn zqkp%XMRb&kn-s4`nh>haVm>!JoAxXYr@LKe`6XyAl)dvyi!jmt?98iZ%?zY|HA+XU zXqhp#XVpUlR$glb!KqfSS^=#7y2J=G+G3Q>Cc#)Q?5o$!{BfL~zgG5g;(LE;o+nW< zc8;V-T|?98z%#j1bHQH^c>e&O>kmHz{dJ3@_{Zi@Gp*C2kFQ7V(QR0;IisERPuP0l zjbe5)?m;g~fL7}>7&$+$mBs9#SKr#7rRB}_ja*(L4c1A`-pdJYb+?%e%f>NZTydFb zr<&~fS(xk%>eLl0p+GbJazzX9U+qs+@13YI)VX$e{MCo{ymUM!MZbRKKBkH5&NSxYBmUJ9kXqn&cy0I162JH$b9aUMSCBCfFceUH#*cWuJ2eVky z^vSVN1NL4gJ@C=b$*bv6PAX@6RP!C0W70Q)?BnDi0<~*8oG;3_N#%4d<~9?#&Ic7+t2rBN$24=s(D3jS>we$cVkYypm(F@! zTC8MrE^SMB@V`Zjw5VeNbR0|#`)ZU`jnQBJq6XGz^5+B2798hn{8z?byljJeLKaxcv z&k9JXvwnv=pEE14&o?k;om*43bddQqz#iOEGvSSKD+l>*V?QLO3vwB|bgOL*l3P(K z1!|5Ml6HfMj-Dx!v{GuJ+DY3QKK2t0D|Ly^=P(w&8;Nnilm1G_z|92AnU-K4+4p>A zoxsL=#2rx^xZjb+QQQEsN+_7o<|1xJZ-8#8qGSi_Y8p~i;KD-*Uk9d{YV{|}6F8HU z#*Jcre_BOU&z(?6L>|Lf(e(bF{bsgvV<$ntt7_|RakF&Bc)4=@Us2M;ikSZZKVurH zP?iFcS~|y{rEyTb^)?f^Gi*T5&WnY*6#gHOQmM{>Pim1B+9o0L6*BpPfhj62VgPCDDDh%#4su_B zz^hOj>NI_WV?Tyz2eCoA9MJl@VC7Htnax(y=i}`n`rTHwdXErUx5>Hl^YP`HMIU#& zKP+Xkz{GS=ueHic0!@+*c5AU!AjWlwhu-*GbP3936ni%v)pzdMUGLgx_fn6l{MvJV zPH^}yj5da-=(huB`J?=N{{V;dI;)GR_|IBvSJR)H@8}faCJB7jg(d#u(8aw)FSiFh z9s}(;^!ZUN{miGB9`!rgr^(RddS-OM#2&ZKM#MmBG`ar(W7CNZ1+O#l-VZp2UdM4{ zkfOF?qf9a~eB58~YqNKH0wEG7AL~&uZcPL7zK2`#AN@yPGqWYH>Labp=khsRu6yYF z-(gkqU(f7c+ftEzi}X_6x|9#ELue13<@NggM08uzhDRT1pJn@Yv;#kJ1r)sMB^K-V z=*nX%<$#d3PAs1c_F7{)9tKpYL;U841e;$xsTpRnQ|l2bTZSsKVPcpGH&2?7oVfKa z7s=e&;B<{ui+jFnktg;2TPtTuiAivZ>4kv4eKqTWUcxTj+&i?mTxWl-8Z;}f&(6PD za#vhk19mMHr)ruOpc3D+5mP}qR?T96Q&-nYhDlykO07_Xv1U&G4y1p-$LGn<{=ie- zKOn5k`2Blb9H5Ek9|0MGAME*&3F#}rtuOmWM-SWc{{YXCev>i6WBBUZ6#V?C#a_t4 z6P3T(zlUP1t#%S{OpPGLp&lN*5w=bXq=&Ui$$(sC1wXfO4tizzXM#N;_6DASl;m&L zeFsCNEN+kM0mO)lTsULtbcS!T5CX=VN@pqzo_yr70v@XQREjF)%hRgeJFa?N~ALH2q~sGIs1uL46CKpQoNm?4u0Ta zH~UNYlF#KmN@tEQ$B1TbT~fAbu@w5(H_dzMM4-)jRqUy%^D|h98}rn2!NRI5x$xvw z^ZuhUnVWdd&0-ixCmt4&E=+u?cAK#rrx2F zf&QcQ$OF&*Khz`Yj&>u|m8ohqXXHPSLCK0SXa4|N=QEhkx1_&U)tw2G_YR#$<>e%P zZ2%Xud>1Ixr8`mUC zQ}o=e%2^24a9-G;OOzY!x1-RT7c!HNEK<&w;RnI)Sf%~6D@sA>)~EfPe&>MhpPmqz zJ5Njdi$q)GXM^YEnnYjY;$&*xe9n0t)&iBJO!&tX$?*m&n5ju-%A_-6Fbh=Gc>1YTuwD4dU+-1jM6pDTkDrWa4wK~R4X4klZP6@GxPG(Dgw*Ux z=!AW=5zJTcQCULn^?ca|uncJlMDO~G1LNAS@XXY~ICtO|qWM$AYk1xiQV3CIR}hr}9PrW2bv6BV%96^H)lS zqJ)RtE`&(st(wG>cAau3+LFZ(7J&H%f6szc3*GSkJGih|fAOEk_!_TVc$l&NUjd!y8J#1d;p$FJ9EC?%bJbC~W>NU9XpV}Ia$~nxsq4h!tx|5n2c~hxm zrw&cf`7?Rht)SLZY;2g&rN7#zFJ_4p^84JLYJ9SxES9b_eRasRBiGBWFYlbmGb|=g zttvKtx7Cqx1+u zJ-WtXmO=67v=)46=6Np*=SwiOn6Ow6{o(=Pvr5u;`9F*nt=fRQ^JqWcAbd^sS;tU%%L4it?g;di6~3XlJlf?A4u%p9p-5cX^icgnt_; z=INr=V6nqj+}MgT9YbOqqnb*k();#>fmv1x{v7RZ?7x-ueU`<`?RI7SkE>`ooe!=h zVSRCR8rGp2sd;*bZ`u2n$OxT#Y%QahiQpCVrBl~PFT>Obt3_(-s%&RG1NSFA{{UWu(dM;#-O>{YgW0){V&;j# zmvt7lTVS?HdG2fI#G2aT+wI2#u>I1`!&7EL2$seUxRPNkt7V?{GJM4>cS^lljejg_ zLAkJP!TAaC=_D|gBmOfQZ1son5*%FX{Wg9=QO%^s$r3;p{S5WS$)3rk zcS-*MUD%%*7(uuiVO5nGGyE6iUzEsF^Idk@swG^LREbcpFyI6U4l8G0SF`{O^cJI=|?$)x@Xh=AazWKLaghW_fvdh+v8My~+8{7JjGL zr%Ns6v)GSd{ADadaELsxlzyLTLqx#G4SD2-d@~qIE3FTYd`E>8a^+iWdyOk-LudK1Ry!XFJWkBk1zT)Mf4a z23=XMGnQ;9pK{lt4t0rg5*2Sq|XG5#m(a_oK{fwah0LtWxbsLk< z6%y0YT~{2Z>V;q|JC^MGS8jX|euhuUiM=UbD$_P#wF~FDNPTQBbf^>c-1YweNP4yB zNGVC`(MZBVFt4Z!eF`=uRaCOq(G<^a$3y1u@b|J$OB%z^Gci3e$7w8oD^qbq+rJ&{ z!iDL7Ix4f5T!|3)TBy$tv+OL)@xaem{2!!E9^oE-8+;jxS(yfSpY$Y5zC`F}^Z}pG zfjnC6FIjx^#bgtOu|B6yj6Ya(PBeculXTneAQvi965RKEOqC~1no5bC9waewzPCxi zzQ)wA3a>!FTB>w=q)6Q%uQ)iiv zIz3sdBg1OP*O^-=o;R+9Cy6ZYxl*HwiEi9mzO7fSmKw10%qMfEgXjW`bIhiA(gWh} zDmdv86`(el&-32jzrGU-3m*gN#3|>SmYzbzu|gFHgep zA?NHBGvh?Fe2davDb8QzP);Z+S_PO3emWF(gvAqjr#upxp8J7uc=>Bx`KvbNuOoc*Nr+D zWD_6G6iNA7mxZ#g3-1#4gZv+(cgKZ{=5sxa(?7758V{ZP-~5-4R;n+CZ?F8$C927> zZ2thcw9eA^pguglN6mxbpW}bRylE|OuLWD}-<9U?=|7rt&`+IjNWfTT8 z*r;AuvHc=X$z*Rkm02G_U4*d*6u5vBph=QF`2q{p05v$}$WL_M)aK)oUz_o6g&jC8 zhDeasDah{4W050yvXm7C&su$!cX{5LlT5&h81wXLD-uDdX# z!!M^29rq<;-8}(H4xVRI@-BGuSHS(K#g|Rerl_Yiztz72y5`aw!QLo;$kIB?ku!0H zZ>da|Z@00n->*U`QIcuL28sUwC{Zfp_5T18&p_wL$^QVJk3;rZJ3YV2`kg+wNz#2| z%i|)tS!v}TQHk(GAkCQDr|hBM{cWvtrI?5D*?%a#d(-;UPYbx~=}+xxs}ir=w#XYr z6w~vZu&0v%mgq9ZR6CwjGj)qfv`c<0U#@5t$A{O8Zd zmT&xte0+cPKga4$Z*V?-uPVBg{#EC+^^$T-sTM=#%0ZmwAILIuSo&FyTK&$Mluzu} zqn^7ah)J#~I&jb*?62Nm%Q|k*+fW!R8D3zYXRIe$FSqAPa1fyaX#2~d`LsE72#I=w zK>dmKZaBcb{nc-KvP3We^mNvX8!&`+Q5|emBE?WF`7$b6B`JS7Pw?OMCC- zuOW!Ws@kh-=SH|QJRW{i%#KO2yidMg;3%e@Y` zRIvJg0HiIQP|%ex&1|++ z=}+w3@;P2!*2r%4VV|slfO;9@@&)?uSr19_{U`Hkwk&GYJqZh=2QZLr!u6`7!b?=) z&tVklo6q}mK-SKWJ)r#`ugU47qu=UIPcRHzl()A#u-WFVnrZ>C_^~ZePNunxuR2p9 z-JaIn-lF&T<0x%4eC{}tFN&VZ7!yk@%PBzcv3i88{c+eFF_+~==c^Y@qeH?% z*VE@r$+b-1)85WuPtJEX?nNAaj(iUmnVPO3$z>H3q^^E@)nZ)mRVkP27?<6qWPXE> z0w?SB7Yc$KsOM{uvg$$AUtQ=mGo-hi{p9;C{I^sT#RL0;6W> zm~?P=a#QA^W}#2@YSfN}Bs0oC#*a^m%*mc36x4eSYsEr-EYrp6u|n+~5b*NdHIlV| zKRyTM!fe#%>piMVme1wqxtryNTew8ekDVX>_c#9k0Qt|H-Pfa>+~!c0Pu7LMTfhSD zX@w~J5@;jXrV4gSh06IEoZnxG3W#B|kJaQ!f8*H8W?Ff)F01GOU^YRk?$#cssfJqs z^1)O!2>XQJN}eCO+tYdV3uLP3!87EuxFM7iWLSiUz}Gs$Mn^+xO8)?vG;(&6KSh0g zXeRnlxJ`BOE%hdn#m_QT2z7nrzkjb1sf;uE`u+MiUPaja{{W@cgDjR;M{GmT0O!ug zn&r*Tvh#7b$v?LEY>oPw=#W(>)=@r5j{{c|q?hv{k#}|HQ`c0qP3LqyGYYNa z3{aHWJ$xLi{SEhR>W&vuOWPXZGTmCb5<3VOkFVfBr6Zk2> z$Kx@I6=)esS-bE)U-CLikf6iqp4@hU)##W$hJ?D~^*VtE8&|juW#>a2F(hST*^BFW) z$)z7o;bDDLuYN=IWc{B1AW&n9SKXN^6sNRDuCdr`@T{GX3|gU{TJ zjRwjVGePES-?!FBwO_$wZS#~u*8V{6$0BDobOBV$Qode}BAXMEdAS(stkph(6-?nD zuDR8UAQO7s_@CpH$?MsQ9=*l%oQTrIzM7}!GUa30ZS=Bqm3X!?V;8Rk74bD43@>oU zZ>`kJ{$*ndmaBdVMrzd5ezYYiqLHLDJ#?N`=eIDY_M6T>0a9x2?*LqmH8SPWh^$ho%9~x0+3`U*j3+>vig}dimM) zWn{mJHY-_wPj!4-EA>q@W+Erh7j%H0RzWm5Q6l zdp&Y~N}e*rKCIOKL(PYv%dsv%{{U$_ex&j!U?Y0J;9NE)QoV| zTM!CwQx7WroqOkkdvxm}TXdGAeDg@b%6btgjJ~8hjl<(p%jonw7o#Cuh==PIkGFrU zulCAH_IaP91Sn|=y$e2{?F{!f3vZ^jqM6L)-c1IG(+v%v8s{{S=np07PMYW+#&sj7EP?ofZ3 z)dII)>>?ddkAM!l!9-ZY%--9&Jh-jqc9NwWHYHC6#HE7Ly7S{io$>wo@J01r)_r#W z01;t+Ny}Bk;m+w@H!*ZK)O4enW0%dfo9SrT^(smeNv=0e+cv2~CO3Z>P4+wTvizs0 zclCL|mhs!onid?WR81#ZmMt{-&N0<0*zc?B+F%4@E@C<}HePgjVDUM~=v<$d_@FiY zHZG*5iXH3~4*hW{gTWIY^RAlz02lp^KZe=Pp$>8ue-G&MEb?={JUsEl9a+`wGX86l zZ;k$+FVr%AK^ywF^CeYWc|sq0`!?K#l|&bRu@{n9mCthB{;E1sdHk!bk1`Gp|SQRaGt z`SiNu&U%dVTobkWZCNXSP)GRh7(Xw)bke7?Nl{lfl`cq5z4!H~TRFVOdlSqSPRB2y zYBT`|YX1OJ{vqt;yPG8i&Z66;I>}QEvrtW)v+NZ;F(aifg%xhjg-|I|iw1BGpXC&E zChjMoPPP3FgXlMjp1(JF`)j{f5dQ#X{k2rB6>p-IDO0M3rVqtznh&*~`;Bb$GwhlT zVjRT|pNQoLgx-t1*S@g`F>YN|$-aM-Dg1Y$P!&5*E8P_kK^#2h;&zUXQJ|UR98c~h z>bdi`Hm1y?qMB28CK=dnPFXse{Wah>Y7}xV?dZ0V1v>Mga~K|V4n7A@!dv7E#TqmFiJ{=(<=9y+5C zlcH9(ZmhqM^AReSkL@xxZ$s*r*OTk7@bB1q8O!{EH@+7k!<7WHTGmm}KYd?S44ut~o2}hs zXq~jq@~%%u$V?px&8lHx#nb1F74qv3Gu6?!wX(R4exD{Huql_VQKHwd3&pT=D>uO> z1w6ZIN|+<}SAABS=lIZT<;-;RQ2zj40-6SE)0<4Io@M_4HUgC8Z}T-&8bX5{)mG8XRcfq^g{AIv@r7I-DQ^Y9*H^Ye$Jf#^}vZ+hqP zQox9Z)brE8CadIg)+_%2E%_pD7yM@asz*PIe^;)WH6Xi#V1glg@*phcXe|Wcd`<%&?hKfNAFG12;Y+w2J{C#%#0N?@steT+Z^C+r? zn4skN<yR~CS-{{X|^I=Z!|h^oF3;^n8-Vg&t?rPO*HKmZ_jKcQ-+>GL>Y&_(guFPh}aXOoIjGF4gh zUM`($kcLkQ*EzkT?+s#lbbcXHdHlhilA_+f-&0mc@>f;NWg@2)O=}z}XV_{MZp=UY zZdV8(;q)i^Iab-TO0HX*j94ytfc+HcsbD_uW9Ohl_$_g_OXZBe_Ks3$P)xT0bSj?J z(Cq#q@g6k!Nv-9+`LTWh{o8^e;^AlG{-kji6kDjO63rp&YiIgZQMQZz%-QRyKenh) zLb{z}sCy@$6~B=FTkfO3`A=!zj5>+_nJf02lv5n>wbTuO$E&w<5Ehs$!M z=~}gAO}dVhOgIvIbu5(vWRNg(d4IM`>ctLF^+nC)A1gQ#`gwWS6n&nLx-NRQ(bfD0 zp)>O3C}e4UiT0Wevd(*xM<-L0M%USn#C!x##q?J?mrl9AxpTx{&zxR=CWn{p)wQIm zWO~Vgocuz0-RoSenhCZ&TT;z!FQ>;`>Q2klSoiT%=u5Dt$=vwty=;ikqRFkDMIbV?k z-?HZul_b0rsF*S&tHQ58k_{{T9^hVb*% z5J6J8uPZ3F7afdA^YxUrS1O<>v)8kIRgbUqr6E^YJotR;DLLwN#aBD+&Gw;tyklqB z3uy(!6=A-^9|`w=?)*oy%c{Orft&k2mY!$X?gw z;_X%fuNJ9GnH@NNFqY1fnFoPO*0$5OXy_7X-^#nOM=bN)+Lu2*Z}ldizW7FYUq`B+ zg|PKi#XTmb*_2}d`SMJf_8Ph$kXh2CSu$l;GuW@GTdIagXPq+7ue?`<5plv~kPFHkLSdA&}9l=-=co>~;v>zA$2!j<$_;%aY0ML)+r zu$VdZLa1b#F0>UW-O;n5IjT~X=c}bYlMZr1T=5_0U5fhv-7p_9T-Nq@9hm8w-y#dZ z6J-&(;;*qpx$^=U?dTi5a?$M6Ggs-bzRIAAK95y(U(rfg^G{ZmZ?;w`phV?!*Yy@6 zZ6{9AkyuZD*G6Fz00Ke%zFGQw3&^;Yq}P0P1jawhVN!inu#Ie^8IDD*pqozS&myty z!Def@_g25$xqabT(498PKup)J-I>0xhK@BDBjxIM!tlWj_Lw7u_~gfy)4evh>z;IU z8!H1)l)^rvCZW~5mfAoQ^MN$D&u_d$_n-UAc)apwA^X+oiYw@JF7^e_b9yvr?1K+M zzAA)VuOtM`RQ~`~x#jeFf0eRSJAlgUO%DA!TH z_iFn*?dDm|;z`p$qW!e!`e7*EsfqYN>I4RHTrCD_0P8ioRs*_%{8OcIU0>tp6hG79 zjOcLOT0CaA4O$CxpqbLyh}UWkJ>QdO_dB`KzKxrm-9&#IS_;Uyof*EbJ0-6YoHt9< zR>2YOD>7>&A8CDe{7>EE@Ke1O-xRRa@cR=>EO%0<>mepg4QYRJVbq$X9m~a@&ydyr zxR4U{Ug!bu5B`X~>lYvMAj9_w6ZVnMjK?pX)5&F4u1MP`gdQhhu@pGTs?N(an+q5i{(^kC{xFUi&#l55MQ-zir; zeO8lnXP?M-Am-!0l<*@&M0el~!mKuW#JwTfqpA3_I#>Qz^?DpmoYqYRyw6WH+?>yk zt4G=}N;5hNW^BXuWha-D!2bYE>Q&EW@$_eGlN6|6?>N5Slj}XP^2F59^>tO`&N`#m zRy_2#FQH-?Cq{ekSAI%8t6EFJUC_g_^kF$ByrW;BQ8N@qG+V%AVU z=I?8>>!E?+%o)+(A7AM`uR7{iMWU+Zar=$v`*ZL6mEv>q6Mf?w?ai%paXG{izRGC$ ziHzs-+sMaa*fd5`v)MnTV7JHAZhPbWZ}Hixka|nn(Ld!K(RmGd7H6wy{A-ytNg3Ua zBvU}@TDLD;6}sA{=v}H376XFUPeXSRiFfgHrSso5OFeljS*-TYguvDhJ=ivj+hb2@YlTa+^gUq2^a(1p;FzcT@ZEE-^ueAvpe=v_7{ zv9F=zaD@7lr4r6#=1ga_`fU*-a~|rs#q`cktf%2r-S(2_oz?xJZGP21-Z#w!b1-*i zdz#Rx-oPKo-}w(;nab@_x$fpiQ^;}4>MEJD?R4A;ja?5{`yW=Kp8k*SO1UU7`T6G$ z;2&R+)0d<)cDi=X>39{#>OUlcmXEd9>9yvDCcZikno3o3c6y3YfDt=@kbcmoVT76!JkdJpl5LepEJtpWuq)w z!+xQktM&M^XeHSd)+ko8f7(y4J=@MTdlPFS1x&Mqdc)h zqUR;-=_MOK-5M%Q@D3vrrIUpePVH{4WzUV~l)~TKAgsPzb*fb<7#|Ix%uzTV)Avm? z35KODLMTtBI{yHt54Gds2j}7c0DjG}o9)qm<8%wA zS37*$Y_~m9F#68Qc?DqTfPbyDEvF zOjsWc9;ytR(u$6!4?=qm5 z8%-c+q~*ghaj6efOZvY7r3R0dRQA2-+W!D7{8#qC;?NxatrDNMSvk#m8eGZkG(Co+?#zbK>vvb9nvGYFF3k5o$s18UC8CoU zeIZl&GIy*|biIA3%KbW9)J1(ZlO4rn4r)E*73C;Rl&B1a$)6Js*9Mf`DZV539yuA6 zOg|)qGH}x!A=Z0B?KNV*`X4*}f~WW~kaDB^hMraP?T0#BY0K>sSBka)A~kujXG000 z8j=Lhci2*VBjfY=e_NmU)%W-ryYD&I^q0;%TV+BMq}R-R#T;K7!8@}8AK)}M!(ti|*}&PHJW031X4?jz5Qf2{GKJR9YnPORNJIc?;%nx0^_YR1m! z$#XvHBy6KQ!n>}t?8@Y#4b_loTlj}Vk1sc!&FkX2=#S3pnB`rbC{^si@Qd3w+Ca{k z{K9btRKZ~9=?i}h#%4eF9(@jSsw-bCuJMY0m!`CdmKqU05bYWK{p%9bMSRX)?>Y`- zv)5muWhp(27EVqU;*DEC%HiqDPfLCJ_Imk?UPm;HWB&jnso)APg@W3TwOjq(EBeaw z+?=#jziG7Av@KxYUi(cEwr|O+*2o}_3Fe$<*{jNeZBk-X?*8Vny6mriZ&0Qww)}o6 zUg6)%D%fOSdGxY&$@`LU9I4vM-x7SPfBX^cJ+sGHDZe>;YzES!Sy zLOu1rAJs{DEJ;?JL+DjQ`r7Jw=ACJE}vaVcpG2-7_J&jEi{{RyD zbz%}{Jt6vYI})OS&G;nLa%Nw_TBz{b1zHH4}C-oo4w1qa3ef8G6J9@lDvitMex$xIA zjaUYFVSnY!-sj{}^7-^|xiU-0;Pg7Lv0Uz@F|NkD>8UdK+soT&ciO6Ff^e5Pt(RtK zewlw;v(@ah(H^KBeX{XT8ok_PKaa-AxpKR5$h#mwJG|ai|m@td~2)F z`G%lQ(a5JQTV?q8Y277dpHT*?sC9EG9*;`ck?irA`2PSWr^@O#=i370nQRpRx(azw z^M1Qk<>JCSJ+|YQt}mK29r_;ccDNJN;k4@G?7ctQDs)VC4}wjQw+uF4v^H=q)ILg%%}@%)`1 z`iFS_Mf(hre#6SEO;REKNVlOI{{U$ZPBx^>Fed34=jwAc^`hFDGr|S);NwMSO|Ort zb91GU*ZwlZgA84e{e9B?!g(3ZsEb&f+VWBYPs&~SA8mWu=wm@|>Hb&Jk^Y}^Jf-^O zX(c4Z4gysD6neA&0J5vtJDgbm00N_t#pItgG4*;sT3cghQk}7s^mF-^bnaI*(eu5! z_4`}3^Nz&-0G5H$6|g#VCrX~BNli3eZjO%hF9c*rZ6v=Hq?Dy96s~1Ng`z~OJF@op z7eLMn^(=h#Pi|adN%ehn`sF0G<4TBeq$&R0-FU4eWd6y1E%{FVK1_vf>W4ac(5hRS zq|Wl_^x+RRyu+DW1imKdxe_zTDqDkt-s|!%E)@umjMaM8`;XH@J6#+nwqjw(A8nKa zmkk9&kvkdo=58PYAHexj!>6Ch4_}mF^~D-vmxcPw`_* z_0#KJ%aK%yv(kQvpi3MBz`hUrupOm!y>%~+)c}pjnPHJPVyy~9DT<~jV@Df)#60A=}O=Fg1Z=V$A z9v&CMf_9xNRRu~KscC%4oL6X$y0r|zVrpS?DxGU?rq+l4)%EcI0Fli{<6QCm)4LR? zpPS9ofH;v)cU6pil+TIR!TWbN#R{gXN^kKl=<9dE()9Y(6;%a47N(r>J6lY4$dcLS zROy-7N;SKi^^if&v(<-0%n#E;l|E0-HegW`8>~Z^%UIks05N{of-!&1e?*3crA6m5 z#ZA=y!6x&q;9XsjjJ;B0e-=(#KT3plXvuj(fkJk7;xp_!SYs7ytLm-HkMRyyv+_=z zI`Zxq`lps@XqgeQaxHTq-suIaz+~Sz6oEL~tt3mu>Gj<;HpO^s>Nz4}+^&|MS2Yx1 z&!sUf#nB9v^H$%ZJ2c!XU(U~174Y0yB-ULs`SG{2?T$V6H}3xc zncFc|qt6$kWk->#(trJO5|cCzjPSPUQ9lKIc5|wISe?d~s`*8j?-iRY{l;b&FVmhv zO0=d@8HelrP2J7~y(acx7K2uul>MCKz@{e6tOl7Ep7j78CBMjM{+-3)P%iEp$$cbc z0**IFtt@V8< zo`t#(9EsRo%I8Fq(#lz_Q5ijg?^><=l~&y)oerH{D-^~q&UrrPn`}yCnn-$Ftp&SN zA3!@4l0$r5C5vCJnfW3Z$t99$Yj}q1PW=v?FCX@pCWEUoa-W_E#G1gqn?XwHB$H9~ z;CXWD>T&{j=bD=7pjXt}Gwc^OYBg!~!-?}aaof@9jA8ZY&~9`801_X^+xY(gqGyK= zj`Yl`fahtgG5C_&TuSJhoUsZA2E+D_`+8g82ySPH?=&F9FR@NjMU2FTyx1m9? zK-7fvMoa#t&b11_(6ov^zHjT9ZIr2;YDwJr29IS;mtNd%%&9(?M?J$iT}EAqEj2?R zP=9iC=S;_+`1OYm7I4V8;F`)XWnQHydo*^HJ|T3~Z4U4u2Mq;+%6na2Ed7D& z^jSwns9l`><+|4~zhk2sqN^1XkK|^dXW{JgQ3$-S346=naio*f51n*d(CBM@swS?w zu`u(NE5(xDsq@$c^X)3jJ!AULhj;Qt^*a9mkxMN%yJPxN@E!Y4IaYBI_0US_O@JE+ z+Yi@rho4p%G>QIw{{YCA8C1WJc_*oUQ3R^Mb}AehAazV|{{WYSM$?vs2T7+mmzUOC z)lXR9Wll!TI=Xac*Ce~#1!7HbdctKgL4G2K6nG6i`K zgLH^}Zr|5a@~p3_J-K+cc1GE%YF3M)l*t@zQEngu;Y*C?99_ppjtTpPVfvbm{{ZDw zZxrQ>*xULglc6X3gnHJeY+C)x*j7=_upu0FB*liZ7V!g=&$)2M-nUcE&9Lik*E~Ge z(hJ<0Z>q#Q#%X6l+2EyWabBU`b0p|pBG+8~9&A4U04{!X=8?%Ry<~G61MZg41Tg(Y zluc7xj_Gf|3zJr)T@x^Sxro;dP+3pX3qFn4c??gy5CY{t`@mYq8}k@Hug8BHY+U}l z-QIyi1x)EC8Tl&cUgeBd+y45hbjDt2C3^khe?n&-DFgafbEdi|jV&oJkDc`^vQVM3 zr%s%{j&4dA&!)nj)=uH|Z3Q>hOPUsRxU1W#Z%<1fGkEUmTpGLVGrku&Km z6d~vGkPJQbFH6n1weETuj1$w}=3kGGCW*P+>Tl+K<9G9}r}=Pzh>7QANV!^N9P%k->u)+LUA{u!71mX7Dv9nybN zXj~nc*)KJ$SgIuS2QB$!T_DrK_DziF&yHxi#*+bxVGAcuGONmDh>0TKjCfO-KmxxQubjVhoYp@&6~moJ&okJkOxTOtrTp03 zRSe%k803@JI@A3nP5f`lZnDn1Fux$=k-gXHQjh90{?Csu4J0e+`{`B%O;iIEfC3xf0-VRus_@JgGOBKS_el4C41+4TKNz#} zu58HP1;^3Z-^o}2tOU@>EG_2`j$v09wb82@>Y*9Zb)PQ2=*nB#=tbG+dUI#Ghg={p+K5LEyrV z?%iuGf2<2iar+KqcNL<{+y$ShWM2x*AM^TRt=HEV(`&XAu>DnMu>xu-y~|m9Oq1|L zmoI$240C);YqRn$zDtNXT%HVFKA)mZ9oFgPAjuz_{&aUiK85Cwo_ak=3n3vD=lcv< zpp{Y6D57kAg*+`~3pvc(fd-Gb_HSpeW_&@^S3Z2J*%n}NcCeZ=Sr54 z&qZtu{{Y)&Fl$PR=-uB_ZFB1QH_*Z?5Fg39Qt@BRW3b-z9cr$U>6*i+G)>L>8ihn7 zJ}d1{LRlL}XGim)^zc9Q-;dPZK*#$EKO(Ynmzv&Awk)s1DYZ+dYD#Q8sS&o`bb35> z@;7=rbQ3z5>1Rnh)7$!UAm%*Bnz{sF$=+P_Ph{o#NPd0PDmhFC zQTXpimGu?D(?44CI;Z~L#Qy+bnEBk(Lrv>IbR#+!H52dGHYJP1s>G6C^@9MgAGx68 zYRt$>Ne}Y>0G$DLdei#t*Z9*VIYYf{6e3Xn9wXFHOi+34SpsOyC%YB@9yHeQ(HV@kqf;|zN2qKgo z+1KetzZ$~kemh@1bTg-QH}vs|BI!gtHS-fYpnh+mbh*7wjQPJ$M-@Ajg0QW#ms6Pp zzqzwlGh+&B{jV13!KfD46UTqOE+$M4!RbeVit$c+E6pFsCG6_2pPI6TyH_)qWiBFW z8dZa4>W{_px9XABF`zM^zb=289XWMB9y=tF@5N13ziMJO#{tmm!}jk#zLRD~9%jKu zm-AP%29V#>-;VR9r=R%uNji(GX<~P~km72gZl^qQMNCuYrbYlgWPW`#q|J~?($Hlc z-Zvs!El)eBNazyc6$aRl@ifX)dP0*wU~MX&C`i!P?nujo*pITR9?~%ai8} zzDh}cm(S{?E?f&=ueR%U>Fj~pnt3Lf?rCSQ>$!2{^8l?+Km3FNt#3|VOaB0B{klWx zO<%|I4u|d%vZa4hQC+b97Gff$ULQ! z7xj|y7cn&GCsqBptqpc*ev|m$W1uOQ@vxy>kEp@bK@0x?r1(?*-O!4vQEyJYpXjIh94obIAprzY&vLFR?bfI2xj;~Pf7$;4SoV=8 zy!CsE1Z}0 zxwNnU0BRkWg&_DJ8s6$3mQ+f#9>nG{c^D%8c+t>+Za>^_d_hIj1(xL@cIT5Z!WI7j zR={rhs`iiZ6${Xnk+?~kAGyL*35J?4#;jCc1%dGNwI2)5?D%MZfj{QJXsNST*dAA` zxamR5H(^(JbgcJ@PXvQY1K(X(win8h)tz32(4<7xl7AoIy4b3Bc_+-~v_7SIJqv{@ zct7ki3TPQxY8l$C$mshh#|%RAnRk9;`A0!F_dhFze9Y=t323K_+Q}nK+^DcIK-#2! zY$;Myhoz{Mc`A0SAHbqfSG02yuCUqDx2(+PL&aC^TqGl9T67jOXum<-|PDc&r`ZGJ?WsyFE5^R;gu=(qNP&^TK55S4!qg z;b*DlZxXBwz2)rr^33_gQ_PF!i#+xPXy;~E?z81;O2}IY&?q5WoRf3s_BnslPgl-< zH&<6-{G+kdTRHj8M_m&3vo#{gE4Os)%^#|~p6l4q`RKe}$@kmt(Ek9`BZ@waZ}{i& zVT5+Xub2wxH2Kp_OuYW3kH#&g-+wtSwZCogDw&*qq<%(IaQPg@MZQr(HF+mMGd$vT zb(KD*RX#X{EeKtrl4!6^qw>1BD&$kh$+|WE z_NYC2G5lM{Ze}pl?rzezvYJ-s`7~u}-)#Q?rFLo&ug|0FpV%9|Ba3pLUbkD*@m&SQ~W_ah%!#}8ulsmsBAg`TZV}D*5x#5ALUq9zCR6-iBEZks`83FKPPo%hUYx$rxM*+H!{tLG`>C=o8VhL4ha%g)Hba}C%R`XhLbsoZGqnUSuPBPH4gh3KCe`e^|02Osj($R zwPrlW)GW|R=!z5Idxk5UH&^Uxk>X50gbO?i_50dWRm92z$cgTfmm`(oxkhv7Qgy^Y-cC(Zo4d`*P`Zq$sQ2SX%Pem^0FK6<{EFm$LD&vtjTCawu#fTwjy9gavGH@8hqEP1pZZRR zC}-+Pz_X)Y?ah%(jyc-qIf;gPEJM>0r{5)epF5-3KG>!>tf#>r1kN>FndaxOvCdIn zkQ?#OrXnx4ySgq)X(oj(^)@01i1(!Q*Ys>2!VOC*Js?0S}*A zTH)lTq_NU79>rQ#CHWd!6IYzNyB8>7OQk%P75U-%yc!4+XikO31A~siSNKpI;;`Qh z-~CKOzOk}?cMFdgH|w~V$>EDSD%S6rE7RCo=cD7viFc_-nBesOr|Bs9%sC&;WlWvex`?6%R}8xzzzdGAN^F9^kQj>tZMmW1b$MKb*+oTbvH zwZ+@$DvmKBQ!Y+QsCM%?XqGE7E@!TF^EsT(Jx*L+fb1(bRU#{79@5*^HN@0B!^j`(+ z8tHcP^Y65161zUs^Ne%rBgWs$7_jbTpQEy9Zd80MmTDJ9&0SLQnf{zFV;NvK!M1B| ztx`YzQ7qQ`6R-UTCxEKfxK%G!#?+z4b~BvM6Ht07Zbq=U#^sikU+fH^->A7y?Q(d0 z#PvG;c1RdXL?}l5E5{9#va3{&^ZW}GsENl8HeydcwSgKJD3-9 z_|^6n`F@O)rbPVp>bQVpS*q_==OqTOeH1WszCXEBHA1s@;UANCic6=`3F=&EWbq~I zDLOAi!rCZ$;8B4(yy&5_eho9?2J zG5VFYLn^10y^k{m$TgmzQdovBG3wYNjx^oVTPy=w1f62%NckTB0M44#jrjGfv>8=x z@s%v`erIp-^|Qk{w5i5pzXr3YSU|rM!~Xyypz&Xai5`UvJl`E-dZLBr*2PHbf9*WS zh%pmt_Y2vML?)Qb{{ZrRshxxF1JoXej+4L3OaAYVGA~|_`QMN1AD<&peC7cjEVTas zM9eQB%=ye8A^B!EaIk!b{{XcB4c{Dg7B9u#9&6Vu{Exr~*uD8*n|E~mpX2=L0?0Sn z^W*&Y{QUm_oc-N7zi08Dr#|s%{F^dMsaGDE*QbI(R-XGxq0BsF1`33mT(g+Br z*q(Dt_Y`dC4x)(|#P8F$F>08vARE8q*{B9^}=>3mYYdQY_ zyLMJdo^EfME^_*Euz*Jz{{Zo{8aAJEN&5=fJfgKH>PbS8>8hg40cADjt+NV4?4F5H zRVxx7y9eYE@HxE>P(psRbU)vFJ>To}J9kCOea({+jieE_0{v{)R=K^hAjNl~yTmCv z3-vZnxAi0Ipu_D-nTOOS(0GD3#n-5O{{YtOP=Li%N_sgv=_jSV3fQiAXaIPgd$Z&0 z)Xh<9sAVI%j4jseOlyLwQNZkUXs0_B!_c|iDUX{DbblOAN=)Y_Uj?z8H+`62mV8XU zvXmAZ?ryD%O{axai^)e)MMiWU8NQ&Eiw0{eurnA~cnKEi4$l>Rwg~uC(ky56yoF>zX>rHxB32TVmb*2!qa}{Q$=i!D{)}(eEHM@p zN%eef0sL?~Dtxa>JWVNlFQZ%ejZt&u;ES*qWlO|qW1+pd4R-3C!d?jKKeo7CD0dK#lT z1iUeKI&a7&eRKppg!?-b^XM&eU{`eD@QC{q5Qi>2h%;$QFru}Y`biTxlYAMDm@>cb~Y@DQ*XBhbP#~WRD*TXp7 zP5EG=saqi(Mf4?hd5-L8zhcxGJ)_o(@t+eboQk2wA%uOGzr~aSk34MOQAHXC0 zuI{dX?cEx=@pO6(RPsV&NlwCY-5csu?&%9ffWQ1-uWfw?ul6nnSSCH9hP9oi>*h00R$IkfqyF_!($3!r*JdeXa=i~Ir!g0Buxg%Jhm*$EHl3?@$4+vsv zkL%(805jcieO>wc4>faUTesqAc?^|*fBJz50WaKIpPJ95q{bZ6v=+*5y+~p;-`R@H z<=i=Y?@u~*7@KbDg&g%))LEWMoZ0#DtsS*7n_S)}A_siuVg;godwekuXNuMXRW6~K zLfHtE=Ygf>lAW#g`p|@G{@!^9=s8>a;4*Bh_SY0#v-%CE)T(pU_I=f%{{UKMV3x*7 z@cBWdfT7NO)k^ephNhIEKQXhd&c?Z;OV1bcOHr=b!h`v6s1$)2m#p93mNi`(p z$LKS2TCb#IVxeYrS<34Djh7k5c4Zr2Y%~Y1h*LO{mCyvV#)CkLb-PO?9BKD?@XgbI zJ~!I9Klc2!p0<@=UZwI8(fcnZiH&SVrX+8%fbx;sto=nZkaBUjaaOp)FIM!rIOe`44BQcQFrwRo66JlL=||Q}a2nsbwho zhxLRMA7)L){eWiv6&Dxr-eftz3D@r zT9dJDm6IXzk%c{f33j8(_j3pRq4rNtKk=`_Eq_@3{O@7%k@W>iQjIfz?h}1NH+q@% z#z_Us(WTFB<$kmMbE$`eMF|RNWiCp1{)hDCIh)aa}BMpY9ju5>7FiMn?G*Y#xV8)W6nb7AzXIE9+p}MDDwHlZ- zR4%6CibaIoDfu6Ox1IS1cEo!M&#*lnS2qj$Po>my)6gH_3U(?C^=(J~wBx`>{+n(^0OmXo$VqBN2FnL|-j_EelQ z78JJ^?o+$t<64KyNw=rTetnAh8JoPe%oqM}h44!&3!^{xnN-{^LIp`N7Q(o%RR#%-=1|9ozx&)){Y^XZh0IKc6p? z(WCs2nTmJIFn)LE3w@ct=YDtm=HccR{15*CYvYBg)iN;c@;9e52Qw<4Y9+*t-<=jO zJ51#@$nY;{D>a@eOR)KObQmg=mL<`8 z$)8Uma`U6q{F&dH!xRQCE|+D-_CRfBFUe67<0#74eTZX08l~2+%osn4$KK>JZeLC> z099c-*)Tq@@9 zSNn%{&Yh8L`#HKf*VgG)T+3TRPZ8?+WjW@14d+sS`pA%CWO>_A2X4!rU8?U^*7x>f z^~uos#!B_i5LqMAv6vPCGRw_$K7gi!PYjv#e2;Fc0cVxbrkn>a3^lSV7L)-BsPQ&| z{{R46H{RbReZLlmuUkNw`Lwh%#&$5C&EFj#l?nKKkC|p}eue)4l3{+`N+{Xnijns= zjPTv^WX7{jq+N^C; z*3sl{uU9(U?MP7i`Tqcoays2V#J)tES38$uKWU_i{{U;(&qPbB_M`?Ojm(&TZ9lTP z(D;UTXg)fEgj6au(3aytsWApD)Xe!EQv>7RgLE_tGz)%ye@$%bR@}?Eu|{QOJpTZ# zM_u2r?L_z%c6g>pbrF;bNK!%D-D#Cv14FP zxR$mO73GN%A2bOsT)GmXW@`1?;0?_X0&k`Gj*Jz~M7M9)oSrJ#y%{>{qxTfhCtouV zqxdmxE0xCWoO=HNlm0Tf>-=-(9yda|WyHnaa?vD*L2@Wq7Og^^qE@!03qs=`d3RQ zOpG1(SB?JwXzP>feZe*TuY94H^QeetBq(rQ0%oCn>p9!d;!#P?DQ~2?tLt+=B6TF= zpG2)BOEet6w{iuHebH{VRCE>j2CMe&&l9M)=;7A8QjF=L=xqN0?|5{E$Yl2(4oylM z@P_S;Mz_w(_(AqLlJuQQH$CJ(OdW<#iPA_Yw+vQW}LeKe+q ziyxQ;>(GvGKmL;dDKnhitU46-;0p7v%&C`jWm@BFRih^@EpF1~{fQku-D^RrX8W@Cl&{Z7`%=yIq zl$G)PnvthR{%U;L@(J?aAR7hub*NhK7>_jZ=UqAck>H^H=D@)BP~2EH34qvR7pQ%& z586Kv;oS&1(Y5sS?nS)mc3PD{S^Ic%DzI3#u)MK-Y1QiWm2?2S#kE&4%g^sRYF|I( zFmq)}y4BAl+oqte-L`$0TlKDR%@{8BOF|i6wR@4| z=tTlj_^ax^)XlEnRvD@P0G7uDe7?c{56o+j?QIf`HS!uud}!sP%omPi*R*WQD@Vl< zErsP^XmeuxY5c*lT0a>70F5U&!RPK{)uBmaJ{(`wfrbLKFIXY!0_LkXY^R#8e@RhF z)Ng;KCfkl^IT1VOE2sur4C*ll%71P2u28Jx{GP^J+NXglOP9)t(nlVg(boLu=Dk%H zr+z6Sp>NgxpuQx3NC7{7%7Uf40)y_2Yhh zv)50YE~mk+2a$Y>06bYYyb`%UeiuO=VTk%O;Ylwc(N1%vdwIoIBN+2g8kHX|ZgjOZ z%b|*#ys;l+>EkrqFA4lHg`xUqKeI#nThem*VAh7C-s1YDnbG>`*i<^r$7k3KPOhBu zrT+k9>JQ{~LvAYM!LH!bHNUv%LS@u|?6NM8IkZfC{*(<;KIdZPxAPvS<8dM^osLXg!9I^&*q__vMETE@nUY9WbiRcYyWtt1_#w3kh9zW!rzF$ob`{a&~BkR=8k6>q)5Y4S>=ks;bQri`>`OXXq5|?5ASI)So zSao^~{Dbj&Y7I~B+=ty@RZw|1h=XRGlFlIBiiK*`l- zd-2rWZ6%(6>O~aP^t47|KeH6XqsB}@zNcqVKZ`q%J?-uCxtK`ap}!!CUT}10$$WY7 zGojV%Go0legOM{U+2^55No^mxeizl7)Q`xf<8@>NzIFX``v!dNhLMo@r6r@z>R1r7 z9@j5DR1BSt#O-)($3QcwOg@aBNu^w9rV!~PKhzFysk+u9()%B7pSEU_^O4hy#oV`J z$-g3UPPOJ%+2j<~%X@!cUa8!WoOY^Dc5k?UAV8JBkJ9{;@(;)O{HdHsj-$3JHWBW! zb&{=ue~WCb%7%7BAU0-)nrTZ}&v1kf?atj(sz&-Z(d1G6RYK%wuRBrw$C4tS00vsJIE&e;=8Xj%=()q#M z-B*rp{$C#t94y@-{#k-8%>6X~0Q(5w;Cz3_z~TJ-{{YX={{VVl{m=RN(!$|#_(;CK zB1Ii~$_Z*ZPO5_RD;pT#(9D_@w3NSJhO?vRUw}$J1J==V6}>S>jZc-sNPO#+uA&>; z?o^0Q{^9lZw#@L=*pE_OpvjqF)vkx33C%xDK<06&Gc47sTVrA1YpMIP{iikKfRbqB z=H2{bk55q?yOB~ENtB^CO|#s7Ut7jCEY5#<4K*{_SA+f}`ikE(YFzHMrR!uvWE~OY z)WbDrg%5nOx{Dh)uU3!ln3k3Q01fpmbLVap-9_ilM>DFvWxvUmP{gpsHuPS{&39I3VHNr(;DSW3F=i$lvKjQ^fk=Ep_87nf4wtGUb2&; zi^{-=J?YO8FR5s+D3|1aiq8~#tXY1BoSSJUOY^Gu0u~U`0iMNZ?lM63VNt6MZVqc% zrja$a-^)KGucefP`_U>*zU`x^({rMWjNMTUb*|8eV0}7DdJdXQB;_Odm_D!Qu7P_Z z63m!r!1a57;5YvOqoi0vx%^fBK1b*DNngNWH~hW3@S@+HFZl62`QMH4FOTz?pC|GD ze@V7Us_6B4xY?8T)EQjlR>>#~T%%gvs_tu)j1@JMh%AgZu;!_&>-*mOrZUDaID{dDVRmgv)u1E$T=#qV>XFpay}udf1QXuUwG% zIVuG)9SAS8qtB%kWRFx<){JXKnk>hCV_O_L@L6gLRtz!LG&H?snH551*43Zw{Wj6( zYPujkx!hU0U()BVRa$ksh9YC~Gz8Sn{{Y7NqQde&e<|Db_vx4pYu3*%3KQ@MA4}Fc zq_ibd%47$(_4zMFy{Tz`?2YHGJenC*Ax}YO5j}eqUuq}w>#mj+-qaka^deG)8P3h@ zODk2*;`H*1I@fXMo@QLyrkm_lpG(Krrb^tXrzG@BZCL*1osndjPJs`|IU67@Zht|` zn7JJAW=!Y(_YHYaurpOJht;j&3gW2gQihRt9-g1X`V0L?!%@rYB@kb}p18xqs! z6BWO2@0knzzuz~3ogB_L52F_+y1r}^-_c|WTuY0*o|TsTGJ$=xQr>$LBzsP* zxhs-SR7h;xg!Cw4>@J*^QHn%zYNQps-Aar%{gsUW0OLB&p1J9whD4aT{b}kMTlRH- zVXCbZs=t;GnWy>Uo#X5=-NW@#p2WKkPtU@KHUn?Oq?HM&Z3dAxV;0Z3^+vyBhv(wN z(AG1oj$a3+@5X8NQ^&pm5=;`hzuc-_Qe9TU)z;;7dNEfYuGd5zYi6GZn=_yE+0v4a zgnNjUiuY7`&Ds!ZQ8tqBO&xeSew4mN&1aDe2y^_6mRlmB)qOQ~ceub^1RD z@Sb+DBUtS#>2)x7>rNQMqE>8l_Vo_ao2yj60MM7OwW_}@4?$+sn>q_W=Dj&ig!u$w zc&ukEaz|RVtxHO!p@z0M$#0mN>h(+^!};h&cKOQ~+(JtgeQ>ND0=-jAhA>Ep}#?vrQ-^q?8qjekZ5pMpEi1}{k zeMV&OozpC^U6_A~L~Ae7~fLa9W_)0!Xbr>uxYYPJ7~{ca4i>U?=j6S`oVX^TOI0`_ zf*F~JB&-Y+r7RXXpB}7G=5Bxqi#q=RloPrfgmM^l^Vwac&dMeH7V^fH#IWLVs%FbKZ=Vy! zXLKB9PN5w$|Gg`bgj>75&8 zv{b4;Z-QCZif92wv#x^AFZ<|G)FO%V$_`Iv)b_s-uBv(8jecW4is_l+hOl{kZdo^M zP>zE>rCY2K#cYlKCF|u&lb#Yt7C`V~n4SJ7xWUo+QHQH@<2|^wW~rJ#2jmLjaNjMJ zzwV z13f4@*FeexHUdTD!TLVZ8Vk^#x8=J?j=AHxCp<&w*#7|9@LCH00Ni=@nEm6M&J2=? zv+INWik`hiMg1P1)pL1KH_Eg+2^s}|AuUiG*Utyn@%_WtyOaQoR;rE46clrqUn%=D zxW2c|ZWri!$fCNa7?t@2OnN#DLYJ)h(4ik!_4y~{P$&MQ1z&Fax7`XnDde`+n(TS)nQN#TO8d%!0rP$9ec_e}1pu6E3cx^lM4+p$5> zqdhk_?Qggk+F1O1+D`h3O7WPqeOb-pB|i?2DCkEBxgu@))X4^|l+@X!^B#4KtzJRq z@BAR+5k1J`Vt3c3F7pR@Hie`6Y4}((em4{(WrA$@&CO4@%RG@q{F(v@Qqw{hg8+Nq zCQUEckKC(Rw$<+iQ8=_tnS>Y~)ff_`RNCH(5ZKjy*fWpgGgOcMxE$G6s;vIP=1DLL z@t;_J#jSShzi0eAwLcSq!|S{EdLPi6iDz>@@0+}CHA?BZ(Wsb+Jr10#Qn2!`74!8L zNOkLG{DnJbKK7!{iv8QHo1*odUe(fcF?j_Z{HFY!U~EEwr)Hy@1+3K1&!eBsj)YF= zQV*p?@TnZ-Pd`FP{jqZFL*#t3W(L2(N&yZxESkkP1fR%2o!5iZkN{ARB|5pCGfrJn;2-=kWgkk>brS84zY4R(vnMj z8uL_#R9!D@_F<^!-)UoJ=z!xF?FJ#+e=U6qIWO#eJbO7(tDCdG{gucAJV}9IG@n7x z(B60VHY!LKx3x(@cDh}Y=Kdz3d|cN>(y4ZKAr34n_m1Ay^E?BwIMvr@Oc6pBzcxwf zQWNW!@!kaWeoDtpVNgrxbZCYo=K7&upYFWzz*{{v0{;NCeaW6~iFucnhXc3At$MUc z_3j8sFr^7|bL^S{6pOdP7}5y{fyeD5*|u{*UdTlH1#(7+r_U#Q*)|Px`m_4=pX^KH zzC;E<$1e{szH!s^NgVz;(h0YNC9eB70zGt1cVzQnXmMzJ5;M;$gDGea*mIB@H6K+G zK@TZ&ni+}_=A{Vw{)$S=Qpo~*5PEW(+SPd(E$cO{Y%%1|(*54(dNrKdhndWR z+N_iPG(C-L+_$?}T`cr2UW?KyqLt&(S#o*^x>ykTQ%~ELnHHbs4Ieb-s8(PNLF2On zRBkpQfL=LAH}R@k{{Y9iUEkc`E)}uHGq*Ma>nd0Jj#@eC=)mN1lmv7ABqt>@6J6;; zuexXN_=#Ky{>{`(PR8q_q_^{BA@*`=W(0(_O85w^mLo; z>N%7%7|o@N$)%M|*Za&AmkXEbHCEYK>m?0!&Cf?43+y%PFITaR{>P4Prwpv7lGLz^ z{hBCW#`~Fz{dB(}&bK#)+C?*D=X_Kv(+YxmsJ@jFmOJ!hTN(iPneKHd3h5zChRMD0 zmkulJE>bzjY>_;1tyKLsDE|PrXnec;7Tj3Iw>4{;b~>GJTK0K(3pI`}FZiaE{yFdjf0}hz z%8z;WOD{F1&k7OYQ$?H#3K00v&Wnq z@iAzxXF`fq0hYN`M8x?WJ}tF8>;ms_x<%91mi=af_xXUSCm08@@UxVqEGU_9#o03>Xh>ng-2&QYovPmjmDQUJrM zTG2oaQftVhpPr6G>WM9-YnXBNmVb?V&UXun6E?X0!(ps+RFAH&SFL}Fel-&Y_81%V z)#)I2?r$AmlR)xuP;)tov92E(=2a~x9;H9k%7vTi$>Bhlcy4L42Mfug;=N7}wZGH- zh8w@zgu(rE<>9?j^lFg)txa670I$R5Qg^T)>rS`$IK)Vny=LwfhKJ~XAm~K?dEI4T zY+@)v>FGyLr>%pv=9?P2$i3&~qeH#5E7?1gdydl5-yAJz7U>u9(G1U@^Sim@{(txn z@L&BefaIcWK9^Z|01&9&=+o?3(|eVg9|{fe&E0RYgnBuf=x#2KBz0ULS&>rp#m>;g zbbU~G>uy#_DB;GjBZa*XipW#zp%~9L8~*?@>wa?1&D_SX@!!XJxH%`Yof>H!JNOB@<*fjJfDnF ze?U0>56U~#*GD-j{{X+7%ZIC}=q=4q7z39dMm~KqR5DrY`&9Kich$_dm)QRR+xb$j zv2ysLV$W?impVNTp<5PuXhlB~S1VuQsP{jrKgEla`z@DDuDzvosWr|YX33xpcF!(- zrb{J1dSy+SI0|H%uBxbM7h{b}BP4*UlsdE}S7_KK)^u3L?I~@hS%Z!GzgyZqZ&Te{ z+xqD9Xp(?(u4Krp(C?QsQkBT-sEQ{Up`OvCBdA_?Gs zQEs6_>OTe!s&M!3nfpY?`3pTxTmJyY{{W5XK_l^FWzazSI)XZxX1BLARdj!^*4&~W zvG>)vrobtHm@PkFfPd+)3wi!sdkc}=?a9;H-cY}&lVpiC0WLmssP(Px!8Tjv@WT3M zlab5#{M+U~Og$}P($1Cgi0`Bs;VENF3PbjhZi<*GMG>rZW6!qAO=SN7uR>5L!6`9g zq5=0=Au*rH-oLZyk65y3B=l(aYE;`Re5_W-)(>jlx)(z|L-Xu&J-X8I^T`t3os182 zj)Gy5`J5N;Kbu!wPsiGSdinGNE$2%%nvyT$%t~qqD<)QHch)L(?_KE_IyDD6PI}kd zZPn6Vu4pTg=MRdjF?$r2Y%d<*#vu3e3LcEJqw>ky@tJ#= zIKk*#i`y+xp5rOjI+6vVKhwV+{B`m0c>Gn-Cui1r1lM!C1NQ!<0KC<&EHO{*Gp4V# zSfq9II+2=A9EA+rYGEoz@(cO^l`h4pl4Bd@I(B)f0nj%#)AOa;BM1+(=QzCY$NVph z>dBeX^iw>1j*NTh=E$)`%?AxE>DDY!48PKfVv(q$+}x@-qvs%{F+#4+g*o<)glWj= zVSgM7HEW$MEeqI;*#d-oqQC4%H}bCizx3|Mk@m9rr!I~nJKDL;3H7aQqa4P|rjG*# zXQ-46rD-fO;L)70!8m4z=*L4N=i|lXKy6K)drQ4;vB-8b>CDaf(E-`*{H!GYQ_oDv z<4V-jXF>pT<7?_MX4V7NBj7V7W%e`M=|c^6@gFpwjbAdKO8MO!a$@RC?ug4VzL!&H zr1{i`*My=Dj8w9sW?~q*-W^Z+oDt2Tk(!#SX`YX?pR&QGh`mh`U$utBV%W2wuU#C* z$g6ykm}}LVxl4YjM8%RIV>C>-6|muj{IoJ4Ljwp}t!8 z{fBSxTBZAYvzg3ly?_>Yceu@Fg=h`#g`$Z2lTXt~c%~o7^>di$!q0nbySA zHZB`JDFlbgrxTywy%jO8NlvZ8BPlTiOzH9~x1ra3DeD@+;fTo$CyJQcgfnN*o!Knva=x3(@MPF5hKlY1Fr#RxWmX?rz zAdhmM(VWP;t4j!EQGa$&EcSO8s7}eKFZuC2W2usHJqh4=Jv9 z?p(fK6d7$knyKe(^7ZPiVJ|ycrnk$%q{eeszSx&b58BUq<^%$xK!P2p~^jVw0{x7cgOB1O546f>9R)_i8Iow zA`b~g7Bde>U-Meja@O>xQU`+Pri_>SyitMXEHLv|%hgpw&5?Tl0Jz$|MpOvtd@miC zLe??ElPfpG*v-rlCvFDL#np285GbLB1ub(I#G172bdyH2Hx^6%UcXk!gyj1V=8dD4 zjuqVg-{qW|bJuK63IaZUBjU+AEl=>;Y9dQNZyGeGu5W!si(jVaNzFvjkyTmU`-hP> znO;~AT%Hcj-d4+=&vKAD_(A@ zxs?w#4v6&r${kU3$;Vdp`Xur?I?uP*=1@Hr;;16uV?`{_=Klcxe=*_RLlM^43k>VC3E1oka8fiK@x`DIV z+CD_wYB~!?TeVq@Z|wd+-)=qNimh`Tt}?mS47~SB{iSnQwdX&%)8?1s5lca2bt@@V zZqxd<3QDSee&ZIRnxWa|nNLZ7UX#xQ^}wnci(S>`R=$!<6lbR+m$KPep(&Uux@!q7 zOyIzq_WuA@U1n&od6xs!3YHpq%t6R3eshL}bzXAi&S`$by73YU}g`pJ6i5(bm+3i&6B_F1PClkq6aqq_MtOAVeyy zf|ibW{#*GU^+o<0IpBWl* zb6__X^Y=PzTkKz@ZU+))NKt>{zlwJH^Yf(=ZO?|DUnVc*;OG#iN6Dz0MFhFMBNvCbnm=w_DG#f|=@PQYI|_)c z5YMYj4)ov5qH|}j`Z{HB6H*KthLBtazqBAK8Nq5b>eQ_V3X?MhlLvtNKY`F^v!uL@ zc#PW5RpFD*6Z>2uQz)LKg!`B+H@CvS_NQNt8Mp2TsdAdL;d;@bW&L8W*DkNP`QcAn z*1Nq1MvNv9bGk=5yd1%nkFC*s$Q99{IWBC+68`{12x>i4_JzoQFbo#Pqv_K3DE0Jh zI=0V%^C~?UXX?igCWZyH48Dn0wf$DsA1rqGN_pg|meu^oe)QG5j}3Bir08P*0G$H_ zu9Ru<1+^xWD@$JSG^+#D4XBv^09!n5T0;VPSkFj?Bc1sDPNCOwU}@0Ab3?ORDd;Xr zc=J$4d@6X(l!cWs{)y>FU^m@_1R2NeNzd+940jcwbQA^9SCn^Tg_!IjSE(q0hY(A|af2s$fR@X%A4;0o0t0Mp;Jviy_s%P)3s zL%VNtn)&@o3Au{tsQJrwZ@{u{gd^{$_d&DDSnnxZ47c^uv$KumrW%8xqPA19_)^>k zP>=x+v=s@}^{8S9qfxx^LgiMVF8h7)^i_Sq6$0HT%V>^z%boO?W#Wo))FO=rgMMiD zy|=enJ%#xn<6s=BYOs7Eq}ls*zql5)A4Xc`Dw^98;tAuK@SC+}a2g2#9(r5&W9EaR zO#F0uJ6|kfn1wUF+Hygwec z@5k1tIUf6KWian5aeYM=_Z_zMSktdYj%qPU&1?5Y-N0YD?oV1SC3@D&)I+fo4dWtd zPx9&TNk1u9)n`G6C}hXftK7<3g-oQwHrL7LelvPs8a4D6C(gXrLGn%Jz1JXgN`>k zQ!?pL-*x;k<=F0|)8aSmNU17gp=9TvNA~wfBs|k%>NJLks)}htJD-kQKja-{N@hTWk{p8Cy|IqBm>eAvau&R_PhZTgf|p4QBW!zd!BJnp3;r6oIx z+PbmIzG5y=YXXX9p$Yj~escl;0A+yZQJ!G6Ot-zw&#y8}{R2IV_9LO5H9IuJM1`WY z&n(JOd=Z;;&FuRta%RKCqNgT_{{UP*lh%ytM-x=H+F2!v>eQmv%rpJ3N`3HB0V%BB z_SEW@&u_$kSvT{!!++NDj-a2_B}ZcCZ}vA)=W99Y&|+TW*_HMXf=sOsb3aLn>7KML zv{F_dKp~%JCnZT%lhH;Vm2UL^0EY8?epBnAt!Dgl)5g*r7 zirP7R%pD*$jxuSwCeLmZZkg$5=-trh-HMmMx>9(qNv(d1=O!KGw0tILvOIs7BXA7m)>~wi7TXdX= zn_^~IdW-$9LGC4D(*}8B(pvNK2RbfaJ21MvYHDQO^1)h^wfn)o8~g-LnP*Fy!q$8l zU{2jgt5v{-9)OaMZ-ViHV@@X&Iq+=E1DSKPae3+0xT|XzPjKhy@qSm+e{gA)upc05~Y;sg|ek^cZ6XRJ5%)$N@5 z)0eH&uqLMwxu+WWk?;WyTl@)#Z$5u{#>IajyIA#O5)A!%`u&EV4M_0nmo`Q7EO$^m zKzdX;zjBr1L>4PwKIk2e+E<#=W@vM`+%AK;yxiuDtdbl8HpPbDCOg!0H}1P%>8+n) zLNyL8zrs*PeqH%3UlSiUgV?S?8);>1wr+X9C~#pdS}GYteOixG7NQmoX!-uX-0~iQ zCzSh>(baA2Yv6+Zl;5_w8gk7oFKV!_91u{3cw?A!Bcs9mJ(6-b7S+p^kFCh(PA{jJ zAGWX}4GJYdTIw?@peI|3p&?(R5=}qExuGY&p;o&7gW5+=Lb*-)w29Y43Wf?B6A?1l z{l%wpWn}6257e{%-yX3^9nXq6Sb2=a84j{XB6kNw4NeL~ zdwZJr>99)^8mj12N>2`qRZ;V+l@_XghozyR zuPj>FT@~&0Khk)W=gpi_Gt2+rx6mV z!D@U*#M!mZGx*?x(#b_$%tiI7?pyjwL-&IG36}T*7uU!RMNd^ zv5wA{YiD7>#)=Hkjf^=Zly~1V113(rfn*B)*eO-VIDO#bJcinDWH-?oYy}t zmWNOyvO2)1{<~w>zs%*uP>@JeZ^s@f3zbYD{AiKvQCnn-*c-GjXqC>VXV=KmV*aVM ztf5|QMwhPxe^dVe>F);*v3KPJF<4wff{=<%h^}`=svlgCx}KWH>Jm=SLT9DKRIY`B ze^;xS$-krdTyvjEItt&t)1>FBeM~x$rD{xXsws!+A)C43OFr|APZan9Cmi`!$I#~b z*Y_s%^VrKNX)0#zjhgC>_=(@oTRnWCAW7P`jNCGk-}-xvICm|G(#~&3vdo+kQYBvr z+GYe+9Vg-LIqn3O`+}CTfBiU>(P4Jwp43*gUTTSbUYY7@#BE&x_c@W(Kp411w1cwG%0DLT zbUI&@UOcw_nkum?ky%%+PFjVurbQkd)d>;GmJN1-*QC4!JT|$7KJ_VrktzqrG({g9 z?Mi4C{P`7sKkN%uO!!xv%EJ=H>dAhElKe}|i1fY|AfhY}CI%=xcjxENJ!6T+ers(5 zE4m+&b-yA$9k9xZqjxU8)l!yYf4>qR6P;f~sC^Tg0dxfQ0c#efR8dbiG1FI6_4*ql z9)K^gw23G`#4R3d2{GohRa3=B3MW`m&=e+7^*q!Ou#b&H8M$ zDP;yM{G;%QXXXbQ82R*bY>)1<^BpJCSGTj!!@*6WeDonM*lx@@&}_fyfNIqa{x;r* zafG72j^xwNBmsU3P6l^y#)qs%5{fZE?yM?dV<5XdL$x{=Xlsk(A!~{s$MYyi{k4TS-H!XX}&Q{E@I!$nuQV66F*`Z$G0Y zY%xL;$koI&LhqN6H#An)=bBL7C*vGt&h+y-AG^AfaU?Yh=kz&8J`pc-a;9{nHP49^ z7Ow`ZA4@5nFtbLhXp6WRP@^A^L=y1Bmn6< z=`nk0hwj+gD4wCGPB#6aVNlQqCp;;Jp>q!8-R+ufVlMgg^$4pV6CQQOI_&?*iMTPx(I$f@flk^&| z$}oQ*-`n}M)&X~=JkEjb8Z+_GbW<$#QZ;lz=XY5R#Co5l`l3p?cOo7o3odcJY5mRD z+gnbD=hs!Jh!Y$slO0Q)I?jvfwV;ehucaE~e`@%yXinaGkerjBS3WC{?ufg~>snHp zXTR8!#Ld3pYlPh%^Exw?g3RUr0*WqwE`X$~aBO8t=@jkLwUCeN zrP-US1*v6f8FFWx5qg8#O6RHSJ@vXjeM+Y(V?O;9{^N&}zv>nIUSyo%i?Jsm6gq1C zs=fGYrJX!-9XsKAnt`9GAF`19vD;QGt#A0ZjbExK^6E8s5vc0O>-iu^3rPLu_$ZRJ zbb@_S5*AY5W~*wM3ZsvAd+_dTSaSnG4fXAS*YJH0CT{cZzGuJ^+&b@QhDe^k)$QLz+P z!~yC$tCt{B`RVo2sNd@%Ts;0~Mu>E&H-h%up-rbw)Q8luqnNfa@Y*q`C3* zcO&xWJE5p|GsgDwt%K~%j2$O)x*mT~)d2Vd5Hqcqx8rP@HW}+wwXlFbGr9Pw#qnt7 zcK+}mHcLFaZ<~&VwT(9Kcd{Y1B7?9?px;Bf&k zXr4I7^>K0`M^^^E%8`cO_vxQ;RBM#ZS1-bAoS*W8VM?*TrQ7K; zd1sj2zP_Cz)5hg&S{C4EmUJyYT9lDyFbnqYfk}Oe2Ck4T(+@c>y(m$T)6gi;(vcd! z7dD31OBiGP*}ne(6Bi8{uh-&Lkqckho3A7NtXy;#K-)cYjSba3$@iJ=GvhNb%+7Eu zbB*7ZE}WYJo)1@>%av6!y(;=V<}2$JjZ~oLzJ_81#5`wTv*nl|VyXWC8RK(j{4(C3 zJ$3iLDyBTLkg4!}DxmygY`Jj>bBec&{W}{defLH8(pR7ZEclyY`?bkC~FT@Wz%e!DqXeBtIG_pTo<` z)LMEY)=xl_JjHX=&3Lf91 zVqd6kDWxT8iuL~Nl-iZKHaaDJaZ4A3vv8ut?P zeeE-nC7<2;@-DyX=n`sF%z}f_f0Ik>9!{!`uWIMdL_Nv+(APbE6mrTWc>QI#C$6eY zDJ7>)kgrOAHm!R{wJ(}*xs)i!>=8SZjxJqX^K-r=bmu* z`m?W6umS%7VYuc1`}fEIJ_pO;yYNdK_#IXf$H1_63j^mf{1@tf#vZ zZ3Q3G1sg=-6N#;t$v$KY{Yt(UCKX)peqKKQhe)~YXh&gq4F`0f07l-Ou&T+Y)KaSY*KO#-3l zV`$Ha&`t^n+QrV6Q?j6CMK!(bl@dHl@p0<@*!v;-E{!G)e7|Iv*>YMBS@u;m@tyO| zhA+*LEre9^zZ8qAyX>;Xp;FA%?KgC)Vq_{A?P9`d7puwgwx571e})?$j^EXDtMKB! z5vtbrUC6#$J6yL_(4xu|Qhw=tb{cABQ^@{!%({`7R5diM=rQQ?@&5pjW!k;T6)R>H zn>WW_)S|R{W1S=e@?aJ@<{%MH`N@|~biY^iXF6WJ6W>IEt64M|@tV!lIZqrvCYs5B z!6pI!06%-jL(AG;KLhNUC!fh?B3L-V5MEKKG8WVtU|zrcW8HO}slV!~>in0S+-+c` z*JnZ99?S~~$7*Tv>+7a86w*@8wCv?MvQMDMrIJHr{Vp|B{qMEuXtLK#$0Gsa)-!!e zXS3pS5i~h_rTqNAU7Y35&z5f2MUqgg>qct7#i|>tsRhr2P7BD|MR@&bSj-9lK@>-;@&?z-W5^A9WzCPKtsixge@E^_wfg6&daOw%H><(WtUi7x zXcJ!FEcjn%@|yAP=()Ac1dXjOakK`3*3>SIo`2c)J}27RN+4z!{5KjC$bVu=lBSX= zIv(L_M^Tu&n%A@1p!JK@Q0~-gHjN(dal7)kYleeLQyZjq{Kcox9=9k`qenO7t zbEm~WRo&o%wM0;f1|bZVX;4RG*VGD&mZi}2s$HW00JT}6OL#7Rg7{2(EwWl8=Po&3 zrpn62%0*Pv_AgR-_Ku$!N~2o`7rTvEIZ|+?F2Ad$=TiAObM22SIIqMEPA$rP9&r0xqw9@F)((22~ z{eqC^dwS3M-Aejto~xDD zeeYm~WX@cNUK-+7uf>K>YY)YSQje?&2>g$~x0m%FcgEma61z9?B=^$Iy2_M%ikWBGOQNSN&~MBmK9N z%vah+KV|3~xit{`J{|SaUwSApZqoEt1BE9nuND|$f*;rr9;dFYv39sg7H7aj7 zE$r1AQ4=Ux$oWokhRKH&Q+f$PQ~&~d)S6i3`Y+}5d6%u4(T;PWBRJfl$=a5YHh|S2 zhEiiBVz!ig0}<7g3SFE-=~p|WvUy&rV^xm&<>19JK6B;?%HRMU!OsB%nJ4vO%hQjA z#<`w-Os=m)h&}czBcinYz~f=02(`5dYvz_qh!4Xl2KVW7dC|cget!;e- z25$3MwdH`H7=?X~Y6f9ClsnOnKEIxL9*~m&#dxIu0C;?tsD4cRJ|{45<16LDzGpkL zpIXnUO#OOWj3Hv(K#hw4{yXv@Pu~LYSr_sBd&HOXjs6?pKR*lB79Yp|03)CMO#Vh+8eco|f3L{- zKaR@Yo3rx1;$WVU)${rD2X4#;7Uk2ChJY!HjHz6dJ2B>3OlsfFvwB@VmuIEYh7ZUL z;6r|=F!5`5LzYtIKD0@=-KuiWN(ItYCbcoN&>LPquf+Yi9VA}^iw~ULwrZv7dl(#| z-dx2m-q);D@t@YU4PVW6XD8V&2stVfqg1Lg1!^ z{7um>+kt&nqM8>h&N*398$%GTgV-C-6yeZ5Ogkk}#y{z6SS#^D^`ZLa@nm7~zZv`E zU1u!NlM`DLGX|JqjzYktW8t!`0KewjyTHUr&fXT@c$+71Ha~br{D}#qZXO zvzH1K%oS5CYUX{Tc>9)@ROPj7oh|BqH`eD{IzJyRgsL!4f?B(2qh1T$TMw5&TQKq4 zDqmYUdljuujjcI zDB3?9{EfWcS9^@OX3BR6;SnZWe-VP&N=0+k$lfJ2SpP7 zM!KDzi3|AS-8%4E{p@czFZ_q1rBV3zND6@E^Q8d0gD*R1mz`d@f^>8ueDmce*Fx6M zO@~8fE^qbE)U7vjzP_n{M#Hjc=jnS;m*b@~rM_x&M;k7D;GE2t{?XrFGA{oBSCkU@ zK6b9Ig!BAKB`vp@_{yR`F@%vY##5%k7Ut*oI}g|#9v9;2hr&3QRsB5LYilK)Eo5*@ zlsz1%Hqco6B|Uz?@1x=CH{ql5b*K|x1C@CpsQQv?G7fDgA1b4EVz--jfno>9ew{8d znX-dhjGeu7)JNCw>hCU$Ku*G=&qci|C-qkI*(pxs-~6qV70X2o)c(5Zs%fi(v`lF; zBp76$?1QiMoTKk@Jk0LMq>a?n5p&1D{=)t@m&=J2xfaZdITTOYMU2rEFAzGM zOXEBeTCJX@d$5Hw{-~3QA#=YUrh4Y<_4_N?`n?PxRPG3oHS*gmZimR%)CuVfWqiUa zIkn{y3|y{+oe8-!@#U9;FHzE%PM^pniiUPg$&cLWEl<;M!I$k=ABeY_zBgNz)lsx|F`~6n7}rft?cYz_ z#1rsIe_FYwJqiwTKX1xUmC-oU`3HEq-JbsdYId!I(@fZtzV*=UhN-!cAVtm!jO$1< zKs}=6tMrKf01@O*#r#Y?oM!eqQ%~sXd;I2iR8dmsSutqS{io|J-76YF&J>}*&HRIx z4v}(yALu?%fB6^Gk7(pcrJg^|kV|DtfCWoBxBmdLZikIoZ1gJ84^xN#0MZF{Z^1Lv zu-|04;Trdj8jW{N0=t;-S|BAYE;&}YCd%S%#8DX zK0>k9p$ z82Jt=lAeWb{YB3*6rfvOpeptrsCJIZAbaRvnkCW}s&JsgJ$2EIQ0r4U2e{WMZo4x_ z8Y8S?`n0yL@8ro!_QY_y%&ho|EyI7g2Q3&C7&(IS<*g}kM~+ZXHBg4HrOe?ok-|8* z7j)N0FP67ab&KP(rnoU3mh~rc%JbRl!uB834K$WB8Lv-XiTS^k)?l8={l0qls=Di< zn*#KG>C{U`x}4nUO{9&N!gJ7YAJkXm#%Zbx?RvWs^VWpBm1qt+X8Jkv%=Pm=x5u3? z6TV|cRn@4%?_!brMA4BwE7Q9J8B_lNjx6x1FDiFD{h905`u;pf^XL9g75pR}p~qC~q~Z7GqXwHY#_q`yO;p^~vSgD`ihci3HP*`u?<8>u6H8d^KzKEu*5X zR(zJ`7~q!pQTX@cDx$>7PLHd-f2-^o#q~`x7I7t?G$J-ROkjy88@Dr(i&oP0$c=-L+Kr}kOLcD&c?SboN zEwwUYO=&rkO6P7*@moa!Gch_U{F--3SK~jBwNBzhxuOQp`pdDjK5D<^xJV>6GcufQjH<5xdj^r3Q*+qu@Y|9&KiFo!aPh*LOx|^(hKc*_tQsqJ2nf z1lzMpQuwHqhRR0)!I}q{Uyy!CrQ?+9v>}}Vmi=0Bf3|d8?D6%xR{7`^XlWY$YX@SO zvrug$MT^P}ja$eAN8|;bcnV*)K3HDX8N0+lKk|A10Dz)(zu&n=eV>*GlGTN)1*^w> zh5l4POfUIAb2Gu^x{(pl3_qWM7JrZu@;@!o)A{)?llk)h0Fuw065lV-xwRnyrYZ)} zLRK2n--*_}aR8It&C!`(I>s~lgAae_^0^thJTz}Ns~Uv`Xx}4PE%}^_8Sc)0ToLuB z-6LjYJBoEF6f`?I9H;d4!#}cQs$U(%vGlcRSgXiHK0WLOs}gVa5BL)q`XrJ3e}%-3 zIi}V;Gk+rve{0QW%I7AQIV`@rN@*cT;|3L~jfgrebfW&h;_8B!*pixa_Wj;n00pP8 zMqK8IdsLrS>`a*22==`=s->KhWRZ4>wDh0E+xnAqCQ(dECq72yG)q46SgJf6gv0C_ zex$|?pZUGNufo6d;h3%Cn*RCh^y0e{5?88Asi(sWDY-Sa7E_riD1v@p z?N15kpB&}Pt!LB)y`&uk6Kpi!bZKpT=KO=to1R%lu=b))>2%4Cr2`#!JK&k%r5jt)L#V z@e1hYV4=M#-Ak9>qsUh?n3S`t58|HH^t&1q z>n)*0Y@KAQ>d&dfLVZ>yhLsa^PZzq_IbBFPKa?HZUs~s7yH`0~CS25Lv|-qJ$138K zSkdUlaLHM<0=ix@l2O(CK>5eqsV(0*r{MeXF|SRQHhMw!>?@S(b=xhG^Z(VxG=vZOJe+m92t;N zZ#}E$zS$AbmQi6dM%q+Jgu))^tA+=yAU&w#5wACZ2CUlhXT1=*F59O=CK6xUN=&ILNxNbX-Pptz#vNb0tyL^yu}X=WM%WvnL(#t}+@eFD!0N z)1r9)037dv02Uieqm%gWMf~5WqG?e6&nYhC=T4gELtw7MWJ%@G@1?*BTHK0#Qs(p| zY)hNj;00boSGX&qeNGtzHwI4t{onu$LG!+ut4IgqbA^m0_2kn;yA;1^bO4sdomt}X z49b8#*0IN{oqu7tqi=^gwSFHj51Sv>=TConqwH@(PuW-n&f(EaD<^9x$OSk({NGW>h;0B1{` z#^7@AOOK@iFXHRmEunTzD=9j8596iiqv%+gJ(Exr`AqbKN?2OHYS3cNWp#@ugbH_g zgE`ewxY~?K>sjGsP<^j}PuI}WUe)=`pGn_|cKeM&0Y$o8?u@g`$uGXA{{V0*(vuk@ zDe+=xXyekBT@&qJHnMZwegZ)UVGPkb+ z%HFaQ`2PSF6cMwb{{ZRM8!oQWX-yVmyx!o9-ALT*zQ~S^EkDd;c zfZvS!=yMpZZ3^A;N7nW8n3DRR-B@iemBsl@WA3+|3j!(THaChd2ca{UtbDdcM3KMz zHSG6wYS&>Yvk)_t!k&mYLByIhGKCtQ+@&Vs-0}8=a>fNgxi$(vc0Zz;SF(9B6h3SJ z047A&(It=MMOY_#4foFA=CugQg_lT+mM5(GB!a_wl+XK=ulkGe^>ZMJ9cM9rk6cMr zqX|+6IxeaYhWVWXMr|~u)Mq>(`Y!RE(v>N}yFvCNVOp zlJH3s{lPg`h(oH`8+&Xi6QGr z%81|jMACU=u@-=H8$7p-kUzFdA!_x*lG>yD0?hjK2a$@m7t7Y_(<=Ip=`UH}NyvQm zwESZ}KFRn1zXM2&3)r{nNo`QUJfSM4=bN?OUk&s7GAVyPXQ0Nhbp4~#@?&!|RB%x>5 z_2OM6F&I`9P;L#I@gK;O(9{og-&dzYw2px55noT7*wP=h@-v*j_kxpFAFNuB9TV!N zuil1rJJ;8(>E$DvI;I_8-wl7ebvizj8mvNpujS`53=+%#0BY+arTLd>j&Doy4F#>U z?^HhJ^zpM2hX6a<>r2T~fP1x(EvVhW=Q6kYl)L8|nVv-9muMWIw1eb?y?$B;ilg_T%NY+wv@i$Rv;*|ZcNw26U?gWxj-Rb; zR1bGMk-EhWA$J62tR9P>mKFOqr9f)i^LlpIH_! zT*41!CQrcB1U%gMx}V^JFw>KMNrJ+!1U&sq)65c9F@oPFc<;jZQO=P~IpomC&}{a- zzItS4&x~0;=i)m07n%Kx{;~Md+z|uwXRp9Xq2eq4BcJ?vmMx|}!p!%2%>Mw7v;Ie^ z`FG`x^Jfd8(?>U+!0?xaKQWYLKoem614C7b5is8jx6UK(DfYLa_S3^+T7W0&)h&)L%q!CJ*dNe5Lu~p z6onBclgGttT!k?GV%vV;{z>+}sBwixGsjdeRyVW`U8Owh!<^8Gh%9inh9DAa9O!=+5*cw1Jj7@t`L`}4A%W?=Ir;nC2Jr13eUpwwEXZ${g? zwAUcf;q{t`&|Lh-sreq`buxQemSA)L04%n<`maS9?>{+x4oiA`O}$BcH{ z`;z|vaZ8Pgm!|lf&nbF+sondX$4b9((q%i*mM)Wlf2&o^==49`iark#U)&QyY47$Y zaJ!`sx>gcWb@W$u?0Y}ju0DyiA1zg7)3IM-3%UOQy6ZZhDqFhbiW@ysF_UdKXFunf zh;l)DzU>9=#Ku1IWPg8{mCz~;3=GBQBn6xHiyyLznV8aV$TQcD^(7zH2a=)YvGIT! zqI{|VeT(&;6Y22<4JMuN4Dd+_grr+L8AFx2m|@;@mMlMxL^YwkQ7jL-_tbVjpC~UK zE5b=*H)=hhyHC|ECUf*T!}!-no5!Q0O+Rob`>jRr^&?YOOF6ymg&h^84x{I?;9`0Z zgBzc@et-ESP45$SYN6=#R$VpF1e{EfbC+J->2Nc1nz#WXX%*@*>m=(J_{SAH_^08Y zj>V)-@p5ywet3AH+?}plJdx1FwGi}Hy5LGS?y~)zIl0CE0OdCho|gUJS39)J$2oRJ zln}~=Zq0DbSHfGHEhv$~g;7w>;qkFa?P4T}f}|tDaRO9av&yJk4J4>n84A&!A7Ss+ylJ_{w*KXO?~o2kd`uS8p|uM#)+~5DhA?H7or0y> zs&EeqXc;{D%Q{A%8&>8CdX`u}O;voe<+<_FhTGufenn(xKL*>f`Yc6@=>4znk@g^Z zCBDdQe5yF7yY5drUou%d-;i*8Zs^Fk`mnM&b^OA!-R)l_B@rJ5r5++^Et*M8f^Ptt zN@WMf93tHB{ywG>x-+X~=6t);H`V&D6J^menwn&^{R6bg1L}#s<_z6l-{kxbXKH$N ztmJ#uC*7ud7FcIo-k16mUZ8Y2_nH1l{{ZWNdf7+evGFXSpOOA66_WN7&38eg__*`_ zur(AoDWX5zF|+jl0B|`~{;W~XG`+&F6@6N>q1A`06yC}yqS_fsN5+XQB4L{T%8Y`w zzy9*4H~#=GYX?!lo3YrF@$N-b59ECAPhYH_Sxt=q=%swr-BFi7VZ&UWIWJJ z`XNqk3H*J*EL%XaCM-6|`(JBiUXOE|QXnU$ezY+XmBoKUu$o`{mWy?9@#*@=M96c~ zN3nRJS@C}=kDmnHiJvDUla3FB(WFK%PukK4&#h8E@2A48s;T~S+C~Aq&(1$9tSsqQ z$%uKJnV0nD{C-OzpQ2p&y`@f9qCVW}^x11Z&0vfLP7D~P&&JQ4Vja_+>(lCN3I+h9 z1W0Fpg!-d8h|_~1-D7F%COu-C}`r$GR-o?z6(nUf5N zlLm(tGs`m$vbpHeL(G`F{#26Tlqu_At}9{1p$*{{Vv(t2e~|0M7h}O6FTz zD&cXgtLN-rO5H@0*IZ}nx%Eocy7~El=SBYKSrM{WSu^@C#z^aa7U;~Ks}uGA0A~~N z55$jG`AK34j1Q=|Rz`p2_#qM6UV@2HXXrG`oBqsp3GxvBIatooj581Qjt zPLb~`kJy*;n*P7+5_7_<&E-`B+T*kdK_|ka3??#CT(acfU^Ig)FV`=f`61E!6gBfY zt?PXh^{wM{Bf4&e!auZGU&Lzq{{Ub2CJ-#ueL*{oyeVUSMl<4#9!l^vilkf0koB$GvOON0j)*=J2GcGT-nBz&)G%F;r5e-3vG$x z;YbQM8g(+ZdFN}p!AMx~qmpZb40YRe|;h*OjETPhUPIii!qSN zEc^2PqC>(xVTS&x>BGiXYIDRjwS~DFz4)P4;%O*e7WpoDOg2-a3|qhbr|WsV1aY|= z@Lz3~Py4BJ6N9_v+UZxIcf@`cKem}UzYEMD(l6G2Q~5=mKg&B*Ca5_67j_xkjw= z@xna*OD`W#j(K4J0G;XeI>+ZfHGb9Pg2&fM8&^DVp>8D+bZo5AD%HE|`*bN<(hbwk z$$Ij3M~u#lp8lr~nLMtUFi4Ffl~lcdY1FPk)%2=G2+oj+qte*DGXA$Zt@{}@3|jnz zLp+tA0KyQ_-&wTR{zr2KKxO>CM+&Nc9rUNG6cklI^ew9`KyJ|&*KQcUZ9w=T9vhiw z)OoM{Z}5E!Iw42yHI>T*`8O%Xx|(vj!44I9ychS#J)?_#1WTMW$2a@G+NPEub3ed} z(dzZ+{{TPv_z)M=(^sj?z#kum8Rz5GNItt`scXOh{zWM)o&Aj8FORMDgghC7LaG?z z&BG$cM}iwX{){GzRORZmUIr^Y`DQ5JxXhbOMqmwDdvSQ(;&O>S4pCG4hvYpCr=ImL znl5H>q1p0@mz4Z)`NH0FXF3)3zV!b9DLNS8;=l3^PdQxJUR@cN=vMOBg}~;cZYA_% zARw7Y8Gg-`zQWl(z}LT_KH2{Osh^b1`yU1i_|xT0dz&(8Od&I!(daHk4?Y&;($QbP z)i^HIBI;K*x9tAq_qXqUJbe4L91e~JAGI_Ht6Q_QR%AxWL9#+X_>P{f=2VH zul;l@+aqJ6`94G_wqxk@sQ^!^CWLyre!V#+jSqUsQ?Da_{{U|L3-fMx^0l%4WA+ez z&ZDwuKO2Icar3DNP>By+LZ&rmK3`zbJsv$1U|T@N@O- z?elg0X|+#N{{Y-%m~(dnjjr|UQNs!gAWQ_1T6mUg)=TlzMEPYdE=_x6aF&qLvS zxk?IZ3!t$S&|ZMmx3$Lnmab3a&uK`WXJ_jd8uvF(`2DVYimD1*7mmN}$nICS5VP1t z>nBpazm^X%O;UjiCH!kKQQ}tA@AZ6dnzguPpuw4U0W-$R>1~Lk~0HC&3fe zU|0PvFHG9x{DvXA$^EQqoRF)J_>AhPxe>6fzQE|KnO{=DlIi&J(6zBuQW}Wpvd`*B z`(+6&^7Hoocz>32xr6zQU7oI&%`nn2+0&z6S38%>rIa}|$klXK`cUd!k&|F!a}fUk zJ9l&LZ3&SAKDPsvko&9Ogs|8LwCflAW@B^ujXJ!*@cnoC-;GGtak>8h-CQ)R*ZV5k zNT8d~GGRBN9H9RIvcY`$l zzNQzU_y<6?Z_g)8ksand&gDCTv1MMbo>xJv--M-vGnY1sTfZ(d{CY) z2H6K=m(KqHU8!X-`0?vyE{VM^R8ttbXw^86I;$VgPJF2>pIP2Pvzp6oqUvIl+&uhc ztZ)(q0gCeUuYQO%O;YYCm0dd8*Mz>W;j{8(o2Xp7240&k2W&I+`7Qbwa3i1RJDJa8 z8KFawsP-x94uOul^~wBK^AF5_Ag_$h5O!4VCl(HGr>h-wHDmQEZpjmKW)qB=2bsGsS3?j;^gcmMyKw`TOBidBG4=^Ip+L}wSQCbNLlu! znydN~@c9v6UCNuL9?DrH=B{MaD+yuc4LC^#C_c4rS10jn#fp%ml^s>H!<>oK^3S9R zuj`ew{{Xa>i_=GqBHy!(tfz1{g1(L47W^~H5_*Y)o<@1m1&Wv8s+H(C$*R%Z^UWQ# z6wN>2AN1tOF6ZO_0FCqMqPI-&4vqNw1O32zeXq$9jX<83RD_aaDoJrhq<%yAy({5x zY3lJVkZOq*C!khdmJ$RPS4G*8RIhYZ8sqy4LxIa`TA{{%M;l`4=SE&dA4?UiWh)*ZcPjhtYHU8e9a2iU-oTwTR;qr+F>B|!R4-_I z0lK8?n9pzX-TafY%oT?Un|ggX0+$E%emq#PN?TJ;`?6tMwbZ!t>`Vbc?y%24RrC?( zN3+)>n0P5IDLi!M^YGRWmx^mVaF4Gvnl0S)e11n&WQ>{oR=b zo5OPfBtE?-czVRYT==-tTgH>0BURFxb@OwZbTFnFV6&Hd*m8+M`1f;*`5rs7`9J0E{9$_`POf0{2b+IGERibT+*kWtN>CbW zC)m>H09eG|tWWAuHl#aq*zT7G`zcIUcK-k(dc73H#(K^7e(;Yjdy~0h#V0X*FJ(n4 zxAFd^a#!Ex)$Wg*1-9F-a_y zKK207LC<3olDR9ezOj9Oj3=Rs_IwE#cex3*VO6OFXZutC01vZDR>ioPq?!KI4x(K$ zF*OYYK-Ewp+<%2vIWzNmY$z;IX4&6ez#Pn$#37p5ZJnw^&3Z!t56kwA2KoZe%bltk z`m}i0DycNHdQGRT{j;Sc>HSUG*MY0>1q9$fMXhZQ82z)T3ddO%QVy z7?Qv>i#*sutTt(Q{-S|jjr6V_?L6XtXI3E(C)HwDv^LrDm9MQ~`E#dBeQGt({{V+O z{{RD_;aT0WjhpdEe2da#|LyVb`O2Y&!Di-JZ{iY*RDPr@lq4hz#}Tr~A3aW{+-pPLzvsh8X5(h8;WLeknqO()Q+iem!H8i!=NVdsV9?M7n8Y424hV z*s0D}-Op9?(1gw6@vdaQq{#O{o-}$_&%tWY*N9CgL$CHaT+7;g&)zx8O1eDy4W8|1 zLN|HyD#}v2T{U?GAMO0ccT7Z;296*=38( zThoqW(~ZWdqrWIzji;eZKlHyx_eJu?{pya{6zbX5__`&~={No!xUe7edAWbLeW4k- zewg~uzRtX*ERiYs>~s{9X1M&r)Wo}0{eHdF+3a(grK!HJJNaUvSalMS|U6jbV1DY&DEq zIJXh+sI_-dKd6KOR8rp&Bw!Jb_*_iq^YQ-xkxkN!YdWWEVg`J+v!YQ4U*(df#QlW_ z*TycH<~?pKmd>X|t*$P2NS0ib`sFUga&9*{x4G`po)@tU=e;2BGb(?#4Q8q9kMcK7 z&WoIW5LwK@6LPVRErgA02lM_ybd?<%3Hr2=iszw$Ki>U*KAv3t9?g7?pW3IIx$h`x zklH#j?gX@YkNp{ET{}1aDk;$xZ?jE?Gwv&`C~zVCbC$L9?+QD7{{Txlxh1I#=QbI? zaJr+qC1kKf>K!u+`rY2;)A)GInUy>+a@787C@bbg3*}LF9GaHP%g%Qm!`3whzWQ zyo9HlBf1rLeP%b%;u8c7@o ze|opB`Tm+WjUF?rLpfp4o{HkH+gTI;0M$v;>o$6Z=W{v#0FB07W?9VRDmd*4IM20? z_1KPeA-OzYv_jatqxsi2>Z&!Cm+BP#PEtW=gc^Jw0+|YOrfyF57O&zfKO~0b zb#F)3uF82db0TGPD8&-HYH2&A)M$vdt5y>8_W`m-4ha%RM7Hy>{MY{gVb1S0n3$07 zkGDMzQ32rZl+9VB_xQ6jKf^h})X0B$ z%*sDhMZmfRc51GK)XG!u-hVw7+p-ybSIUT}r7Vuj18$lpVB3;>RSiHKWd2<`yZxW5 z*y+U=765WGsN~7>Y`Q-x4nShuxp~MOlW_VOQRa}T$6$v{za{(~yx%UqBWZVXZ^>M# zYGtVs zKP>D&lh6rbGOH2q%547 z!Tsr@Tr#TxY~>~{VvV~yRm*KHbLCJn`dN;#(!pocsgb+)dcK>zp2GADT_$~vYzpV} z7SLOAX+)QJLDEij4Nv_iz4+%}k1g!rTE521JGvwjPc%PUE}A1GDvQOmMD4_-wyTpH zIiiZFdupGt((Q-ky)fnj{(^8*1n?((JxiP!Uh>*6$34KF+FL~j&~l2YJ&GEg{{W}l zpT~ST^ig(Ye$K`6yAVO|QWauZ9V-APsT4hgLa*RW_{o_UU-BByzxkSysv6Vg^i;lF zQ@ZLT_C9FW$?<>B%)IY+(NguArR}9+A72a;4?&$yT&?WcWK~}8-5wU9VHTfHlNg^q zXLBfTj+fN3u}uyS2Tq*)kGTH;-8p=}9-D@(ovxy3FO`o|yz8Jwo0E+AP=`nl{6?28 zmFs_Bl|Z3N>{-3*} zBR?PPeTp4dGDDK?yx~g~S2L{kx%xt#TGn#M^+CyGuV`ED7Nha6{XdU!eJVLtT5EQ* z_QI22ILr1G@MzX1o^>ZHunFAZncBaCumswgI;;G8&QU8V=gD->yR|)1u$eIinSjab z&}CWR=gX$QN?^YRl;8M*T($l!!nQug8txS}^!wn$Ufl`q9*82Yd*`)lKm!sQ+?{-equ`GL&*=JUTg{Y3Vqowuy zz22`c5Yr*|P4hB~V;^A3=RTive(Isn`|Oif&XfIaYzF=I`o*T*riLh0n(Eu&!vPPK zp;JffJX4zU=yQ2X!WE@mj+;4+I>VZMI_i|*ujJ#^OHIv=faortGN+t!pbd9WANqgz z_bc)a)eFvl?s~3;VP^AW=~4ub+i~OieSI!7f z=*68M`}gZA7k@5_?WCAS44I(A^m#Rl@eR+&8Q0==CCU0#IN|AsyRLK6%6^G$340q& zroZbxj@W9T{{XyR#Z#?!u~H|Nks1EWF7T!GJJOaIf(KS8HK4P+8sLvg!*VW)78vczXe6jZb4r8x9?#^B;@^|>TB{U}oAwp+ zm1cX|Z0J_1-O%PoMF!bXCx(DA{{UzvP|?sw7@5-Kw=JR{vnY_ybxm?pWBu9-c0BZC-N$~#w zr#mD(UOU_jYIv@qd#)*&w8*RKtZ&`*Ua4FwnNW@~LT$!s)NAT*Q@Y6X4hboLD#PO^ zId!shbC7;DW{xt;Uw=CF>UCi;)TdJBu!s*rd4zshPAt19 zM6zl{t_td6&}}yFxKhuO6v?A2)>a7oYVv=6=8Ur`eNWAqbu!ycl*84YpzO4EE@iYs z0Lu__+H3m|Vy#J`8i2Q&Y~&HEIipNMNI zMB(-3IvSbS{9vOoUd~5%a)=`6airtcudy=e%9WXGTOXU$0I!;~v(V0 zxMucXRg!rO#_MmC1N(`GsDnw90$o~tlYfF&?kaLAmTdGS(<1y9a7|$g0#}s#K#7m~ zoAL;Fn0Y%tCHxnw%woB``S{zM>KW%p^Uz+_bnx0oz?VGGrt@HxbPd~fUVwTWX z*A)58-@hW*+?rjp)jruAJtaDfu9Qc>53?6}-5Hp^M7;WW-F2n0N~d%8Yw1x3s*mnD z-E%RBV!EBnp7#tLH%l=kOO|C(FG@g)E^xUj1~;GXc2Ik@(f+$mzKedR+SwsS2+u>8 zuWEfINv*_gjD?Meuyg<&>vs29D&$Zqvk?ZESsm!ie z-0xpKO!rIv!re&zDGx`J#zLj!t#bs&`;SdM2TC=?_rG&;ps^=A5v%9@u7|GvilE(N z+Rx6Gwk2Ngtm^Eermx&N-;VPpGv0L*#mf59bb7*lPn4Z=(G=Z8D7|lre``DE$2TvD zQu*V?%vCEiwk`Ks4TXWW^v;I3(F`*CzN&*9sxDOt=t0wz#m(>iSVuhkhn%`h0ncL^ zg&P##N74#>maluJq-^t_$IR*mXT#Lb zpVHf?ZqZ${qQR)i)*DP#zZ!QE`N>LGzKY0+5>B1jm}ZhTC%S8g$QEk!jV{7#BUSMW zx?0~C09?hBst4ndOw`1Mq2B3_tS z#S8|nyBAaXlTV%^)lxMl3nl&y#(iNexx~bW_*$Z4^ge&92uFWiPNFvN*=!WVrTmg! zF$xg}krw;)M_7X^AwOq@H8XWa<8Pe3Nqsxrp_;RxdV|rcmN7lptEa)|7t1xW%G1Wr zDyUwraRx-%H}xlHXZ(AhbI!Mm(z82f{=X7cl}MD_DlFGGV8zY}qn)`<4)i8lxZP&P zE8e_>c1BrdmLN6I4*|n z&uD3jlF6o>m3<_0W2wlRzq8|)Uyss2BbdhD0e{qf%6V({!^pK<(`7)JvT>`Cy@qrp zfaPTx>MMYSazaRzji3;}Z{eoy>=o7dExmZEL~$<*p*QW+a{h&FV_PbJySENKYndM9 zkheh(ljfdhJbEO^Am=^&_C#Gipiagnf(dNf6_HCY>^$kwR6jyx75@MmmVI2n>+}nr z-rq^`yOYyY!l0%uk8&zk!RoZvx-Wpbq_}K^Qo}mKiZL#C1JY0gJg&!MwbH`g&pq<= zXD&g*GFyMx@#hnroT;yj{+uz_ZM0U!bApXi)hs>!5AANx%zErsH>E!@X~2esmI8{&>1M1)D)7z5aK1wSPbIS$`mP1Hy#5Pi-8>o_Ggw!e6_WNu6 zYn9Ngr&=xj)d=ns%JvaY8Ci0jlp`k@jfJcK00zSF1n8K4x*wA9C+Nn7%WX7%KP_~> z@!qD(Ba9QgngXY-wSOQP;&Ox*MnFS~M|1{{Xjp?ql#IQ7I~=;P=vX zofAxv>`cO?Q}a5co>150_o)0}iAT))t%hYS)aTtL(Le(kNl4#k*de9=0IRElbo4rZ zjiaUcHwW36VKob!=uW z7b*QCsTy4xYagv)cJH6C74-pt{{RMD9aR6Zm*c#Qh|xU(HEDuV-4r%*zA?K=Miatp5PP76}>u00;GYm~LM_rhO#UPwqr1 z3dy^urMRLmv+RA0phqY?-wKS|$D`JkXo+U~x-=ywCz8b{ zyEotQ8!RMAPs*Bk`u)P@F22C}7va$P%Ts~ACg95cSP1lrlPUiI+KoXk*algH1Nu*& z*rI2uYP`t(G*W;!A}>>ljaf|Pk^Bw`zc(J7Y5uI)7;Wz{Hhzx>h!1+UUWZgJ^r7N@ef;_N_tNCA{8lR znoa}wbJ8}Od9Y!bGRbr-3kgIu{x3+osn5EZ z8Nz5?J{#t3ZD%#qj>+d1F)Gy62%{Fep{+39zHZD|uJ3Kxe(d2i3SySICPI{tWURy^o??WfFNOIz>q&dT`|PF$c`(=YDY=e}9kx<+?$i0zm_;#Zf|a~x{|!8}e) zrbS2Njco1CH%Fv|jJxbXHFMG8dTxyxZdsR$MCHL-1d;)uvEAX-&)qI5$sPV3U#sRV zqIWlN*rcILrz5Cpp|`!?w=-UA^h}A6e};9taLJ#Kwpabw!F17VZ?NPex~5emI&F$? zTI7LgB;Y0NCRWE!pI*YoxRn`Jz} zRs1!R`Ri)jUj)F|=)datk}A$;?`w5(o}9>TVwnL$>jtZ<)$Y<({yYs?!k#NN2dP-A-b%Ww2I0v%tYGUIrK^ z#4y)|%w&+iKPE$g9ZE$i3A!=j32F;x7JZs3hLB9MIqE@bgrak+nMhm-==S;@hii28 zaX#|JOeDV@^n7QMGvoXKd;MSpN&)rm;eLMSP~MF{ir?Hs(ZKd{R6Ln4x_a!9*w(2Z zKtWb)ewB&Mj6l`)9tHZ|m;RxjI$LDr!I6GH%;w!nWUO_w5Of%o+3&NR7pAA~)S$|) z3I71sK5oVzkN$1bXD8FjyFVXCWk*xbe;V!AHC*b*QiG%K$++PyJ}UC~Vm<3l0km8?R09Pp1@rpz zEjsp_O=uF_3ZYw0@+ixrBD2ysy{{UWvv;P3G92Ci+T$a^L%(GqaO*3EFPnEdB zCO@MmCKYo0XGEFH=S&?xx$*KU`Aa8mT|B2nHVdr=8V0S_Kiz`zqoVJ1YHcI*ZmE-s z)T=4_zC+O6CvhuM7=tq`ttz`7iRc)kNvp+0R99Kgw5UG z3=H_({dRYg$DTf6_w49}RPXZymb2@~Un&>BK};vCvp3L8$^6!g`DFZra|5K+jY3@X zI)IMs18o3ipqcD)rwqer5`>QyO5MO`$?^IfUuQCJwacN3wFk zPHj(4hUb6rzF*eoPiOa!IH`MN^|o||WNzC}_jmNqIs@) zRDoL@l)w5?3ulEm#0w6uQeW;j;bPR&S#@}5>AxqBSra=kGPLxWAM~&Z#2>0}?k}IRtCz;FrY?;;JLBtnIbBXiW{~wGAVpK6mWS9!6O@0D z^11GYnw!Yz{z~f2c~j8nmYLLjsMeI%tW>YHX=NIRZMc+cE6O_4)mImx`E}h1B!T^= zbTCx9W_=xh$nT)%lm7s1QNKOxUB7PIW~zUyvgtMJtD{dw`>llvlG+g@eO`B8(8;rT zQF^4E0_^fC*o<bp`nzKuo>Q*0@`*t)2o7)u;)`o(uUkg9(W)-kjz zsp$R9jQX~m+ad|4C!o3%ymRafmSX1`+&;n~)l&~r&dsOG=~mi!O335-)+Ij5tLta& zM0OvKkeg6`ZJ+TmGZsQQ}ZH`El6P5 zDXHIK33u#$A?(`CBbA)bC2=QOwcfbsO-M5EuUu_p5S&Vnp+lS%rw|_gmr{Sl`W1D5 zZ*;Pu{<^t5`d+s+PU_=J>@sQRANF%iSW)Yg?^5drPX7SK;el$at-R^#bu{vFX}R&5 zYANSb?RjRp;F@(a)Sn1c!S$`Q;DIUg&V3D@SHRX?@`U}2!%emvvRt!M)xXCULVKjY z@#nsxyMy`T{{SHAkSsm?jJv12rE_`OpQt_(P?$Piw&OjZpT4=$O7_h$C7&|>6Zn4$ z=64Nh+*=iYGAWq<0I%}ylloGyspPE-p^Kx8O_3@V>u6SbyuU8|Yrfd0?Q_;rtJG|y zy(n{&LB*uWjDqRk&wTBFuAqp%=hyj5KoX!ovrCnt$x_j2FI($3F#iC_X*<|s?R#^~ z>7dnE<$ z(ji~bxrQ^nXTH1-j*cAUZN?rZ!awZJb{jt!?sMRooqT^YiEB9YrJD|PeDm4VDTtI! zs)C7`HCg+Lx1bt~ROLo*<>ZMfhn>%2nMz|Fou2M|O0M3G{QO6=psIQ~U0Hh6{qTI3 z{G4HgJ*vMFho&6Uh(=7VK1o;9-Si=(40PksIaH6Z=%k&FaWGdisj=GMe>b=Ji|Fq4 zduWTk`ZoTLE@@7gJfK8y@|nwuK#m$kTkDiXl@)H?(PjwrM7;T%)LZ8}S=HPzZOCk` z^fRh<3aM(Ed6=bVpR6>qeF-q-ej(fCm3?X>uDN@vnGl=NA#N1+Kacxee)*&&h?#!l zO+E}`+8SsEZ&UHx7LMEx<>aWRpxYG0<>Jknj#o|0ZjPcEPy?zRQkB`IDdA?l# z0KwJt_^7v;)=yH3D}1s=D&_^HtLjj5jUj7Vsg@4=L;Vg#x-1f*e}qZI{kEH#!nqyg z+3(fKFFU1IGx6eh%EdHndKb_oc$l=Lo>DRq8T|g|dlDkJJzXAzTKZJ<%C9uo*|A|r zJaK;yAsRC@pIZ+BtSR@I&YC15u63+G$a-!v1^)mhO^Jt;fbyoz%~PW?sf%&7A+kGu zRAW~Ik=$Ypm5M3brsU3~{g31IT75Bnb(?mPtEb?_i0Mf`tQN0Czx6!q$?14Bu2)EsvN;9N99=1^NE~Ye9Op{2~B>D2o=>c7$|y- z6zl9Fr5&z=I;pA-O=~f~wB$riscBAC_2{jt-kf~X#DlscIsjfq8lzA5k@W58Nusge z+vgSwiu*)7(=4b-5wqN49_HuuB|k267NFA}g=7TopI2jBpMUT_t?#2f!MU06%X(5d zucOCSLC<(#wD-b~+$Tf@#_vv<_{)`RW^<^~S-YDw?4Q+2>X+<4m7<*4Ayt#G1#qc# zN6MY_rT1Swec7H~OXLSR6`O05; zHD;V&L7p2~V=f^SB5EWD7z&BS{cHRap4~drS@eG(x5E)DR=;mYjl-YU>SZVeRF1W5 zs|f!9bjX0b{{U}7B0(){p)d8cx4~pgO7~0Go4@6HMgB~c^5FIA{CCese$cS@T{{SK zOFc8IQJ!T$h1BxQl+x$~rLKVfM4$D$JptAdqmzQIm?dMPQPU084U#d?@imb{oK zN(z#r`gPg*pI^{^i~e_a9Zfl|J=LD>F@@wT^{e`fK3VZ93w%VgDf?i&_J1<+{yqYX z*DvSG*XqQtK2KH(`1tU`(aAu6R!e-{44nDvnXbzICSiWPZe4)O z5n|*}lMPdyNfP@X23QDH+G6d%^D_h7@Xh`MUony`4JWbDJsE|J_l|rb!3@>|{hlEc zsr3zu^~ol*ru{LWM-h^%pZjCo{&eM{Y2VUvsvlxn8DL`%#K^mT^WDk4R%sZMInL&o7u9x$L^5_P+l>8B$jB4;$5W!Vem~e}8#RvV#K!vI z^Pc(JM=hw6rC3)|J0A>3Kwk5S5>5#GiG5a3%U?8HgGAhdQQK8!rODaT22UL8Uz({U z{qXU25iQ>_3q)H{Vm~okc+=kn9cUJ8&YVwp5Y3FZ7^pKj$L*2IYL(h%AJ`?&81*IU zax(6_y0#4wdwolzT+er#?E!YBAZLtIQ!2rrbQ!g3n7vD=k(Yn9M}*07WW4HKv|_5< zuN$=uI6k#6xofTiCrEAgqKo9*ViwI9zLpC?IRD`7bw23_m*6fBt^*bf|j zxQxVhShjjhD3-V}hDSa`L)zz^`jE`4D*mi2hSF6>@;d7``yL=9N6&-v> zU_P$TQzQA7mf$KgGDNYVS%BtB-kZSf*0<9yNA0c&wrM&R6rc>3rP-Z0&RzN?zST{5 zE_ckC_htJZZREpDx7R-Y>%DPbs2w?T78-V4O8K*Z>DNPrnMtwg(zDN=M>>Z z4y-&B{tw3nYd+-lvbH(BCQoPCO3FtdogW&|0{aW4$?GwN{7ERq0}2jzYblw2Ufb=C zmvldHKcL#Owlmf)UL2dBYbsf~ZCY(^jH}O{X!224CGcHEpI)I#lE*xKMoz_5RwK_$ z2D(dpmYHYqT?oaP5U0voW_bDfR!KCzR)&V2EWd%Rp`ZO<7cBn(?J?XNc@7=>g#l)A z)ZYj?1ccK-)jR}`1h{Jc%Op>;4Sx+8*(A(Nyso<mj=%;D-6oEPJ=W1mMW&}7xe46GtTck-=ugh1T!wUtcgRA`_E<=kg zxet8)3mqvGdQ00~t5oxs%*BO-G%f7$Bly?OpKlA7#@>7TlC=6QpYyR9vtkAzl z@^|DE8rjwV085NCc$y`ONMYT7L7P$Al)6;U8t|>kQlqdO3vC})>U8-OE&Wf(S#RxN z&&a7Hw4_=4`5-Grod?axqC^Qh>MV+e(OiV5tP6ij#wiIks1|d`&t%Hb5FZ(j)jyo$ zxr;#x6`nsI!GHGVHj;63y&w=X!}>?bW2T)T4-uox418Je&7EuC5>_wck#z>HgL#E) z!2bY#eb|8hn>1D83t|?$^nGW1vEo^vELxZA&&eq3F+T`Ibjd*|R=$i?k$pCJdlif6 zLgl6#KqWw#pj-VJay`P|BgKaucg=}UcF z^y6I{OP_M@01vel`u_m2AJ4M{*^G^gu?&*s;i-sjvxhW&n?EUyBYxo zaifvVs_6_P}A79rC`ETb26{_~1NYz@>*U~~pBIYMo ziyRSd=~3e!VTliSSHeHT`G@sePh+5u8c|hKI|~Q#=xNycy*GN#EH%Z>{;}yZb1G#e zcWJghzQ1AVb4thb5n}FZ=+W9eEZrg&d=mATr?5rmxuR^Pco~GRRsrDu0F3^>HZ$Ow z-6oOefG0C^SF0=!)1v7A0OEB6Oysl0GCfX^i&5-n!!vcN^Zq2S;7cI}7xmtrhWjmd zLXh_?*cxyz)G=!JJa_Aw{{XyKyZtpCDlGC}`DfYr`UaK_U(5JvJU_Qf=YP&({QiD_ z=RfECeu#T+*Yw41=f=UMtZkiO@|u<=N1F}9UBO~p8!mW+Y^^w-IwuZSAG!GFTAqBW z`f>I-=vkm7Mtn_|oll)h@K3D*?031vgX1fUb*Dcw_LuWH{{a4Tk&JY&d30x86HJ!u z-u#?>?{_<+>8P78ooMpMXOMFWj!+M&v|ufZiPY!LN0j?0LUrdb^b$BLGk%gLg~Gwl zB(eP7cQMFfm(sbXPX7R`^|v;ppRiPoo5Fmi6KdT%rA%r-j+d1H_&^80AE-P30Gg<3 z;`QrNc_etsQO0?#KH4ac-0hJnFG4`k_@mv-Y$8l>8n!k@)8jm(fRZZb^R2 zZ(9_=mNitAl2H=bX6E#Oety}upYaL}y$*LA)pVqoW3#slz0SLySq{^%*;|V|DW|R2 zX1H7IX|J8DUZ^m=JOE?j{iBS)?s8;~YGf+c$Mcrix~^^1+36f4%8OFV$kz7I`GpP^i->2SQ-oF0!l%?7VL{{We!oM%g>WnO~#1}{hEUmwz{ z!6B{}?5|!4hO}qh6IQH<*dAIw!6NzAe+|a!{{Ze8KO$s^hS1!hceQHQ1~FmiLS4AZ z*vD$rDnPl0S|Ggn(bDPN#fSF)0LHumg&B)+ezfyL?Xr)bl9h~L+tBrA!$>sP%oh~^ zKa+x{=_82(c3`XPU(AE1E|h-ZGofmrRw^o8lU9$kplnnV>6*E`%k>(16Dphf9QsBo zawy2TG*Y(?wsG;+1^)mGMG_Gj=th#Pj2-huNoNQ0Bhlvm<(E@xS-3VLbg8Uudpz6e zHc{bmCKH`hcrQtaNVHpDCy~F>{{T_G!>*eD0B{UJbQfzyq}@ReIHN#LgCq$jHGfcA z$ss!dyzAjFfj)IKoGm9Sf669CB9o@DnX1|PCixpa{{Y?f)al%L_@7Qy*BAlS7c;ou zWn9>50$U{x-yyTpQVHtWd>(enz*L~Am5W9uNyG9E2jhH|x@MmfjV;Jw(>+=B&brxS z-D_Z}085k`9EBlGQ)SDY~(3FQ5?fmo>mM{%vbuZ6(d+ z$;6efZ$-fFf;zMq)iY&WT}o+#qRP5b=(iavtB-f+O1~|oZ09>V&!lLs3R0eg+tC_Z ze6xX<*^TskSoU_7ESiI9uIj6%^}WP}g%j6B zNY$g}iKKRqHQS1r$r?Y)a}b9sH}25Yv&ZM>QSM^#t1al}Z_S-%1w`euzjkUo{{U4((1uRs^!{TojNZCCT(8HskWkpZ)sf7x ztVDuU*1%C<~52yL-f1<0Buh@ zaX$Mg2^a_AI1?I7ZY&<@ymQNZ*W`J>;79v2YlHU3TO`f=gSO7iWX3e&u=+@Z-M>jJ zpRQ9+H13UGKV13pI(6zceOK+X@wz=afJeoCERJ`nnUt5T#dOoEEvJ9`9(ss=R>gZL z9p?T_NSA{MRlC);%Pm3 z?xhMz>8U>1Rl3~<5erWal=P(_N0@Kg)04$ozY=I=gEs0%8J;ir)R&k69{&K5{beI9 z9c!ZeeLpLc&>BK-o-$V_nHopK zIDd+mMGYNhSJtt4%jx&Ek^~E{pEoHB$pPQE&TjZyzC65v zj|WTSoI#UVevEkV`d_wJ(b>Idqp7n=b8z?lVqBX!gW8`k!CeeZWPMz| zi?iuSEns?*xvwQ=Pypuh+~iGz&-@Ukvn9UnTeM4}<) zy+1vnuC}%|w$R6mSLKb|UaZ7JNn7Lc2mb)k@?Vjbi*qr7o29rQCFP8wBB5eBuQck_ zx;yYAt@-g@Z}|TJ%kk~p7yK;niw37v@=QHJibs4_fWi0~*0y+%;Qs&}R+{v+m~Pzu z?Kg0G6vOw~(`_>B>3^Zvn8j{3hLTzoeSN85OFuzoRX_J`fq8$@JWgd?wRY7kHd<`T zI#!27T%zJb#Y+9*_I-4xr*o)UQ_r5wUg%XlgZPu}>B%+qUYQ~MWl6?16wfOt_nn)w zwR*MvA^Oan%3|mmWxeQs^@Q3YLF@7O@gL>qn}iI{^YfqlZ}Hsm-;K@vGrh=V2$f8l zs=_N;ozJYZBL4sYJ{XN*jbkPUq05g)7fY+jy&wv=h3&(W3^5Be7Km;`@6_P6Xp^=JP8aQ@h9Gk4^&a?g*ClKoTo z{{TK%6wjqMxAH<71@l49jXdpP__3neXhEV0X?c zo`UyL_s6ST+mt#Q!#1>oy1DDDSq}I^_1`P#l-h0V_1vy~{>4v{Lyy1fYp0uk-3TXar%hb>qf;68Gpf5MPivQdN*V3-IVkL>3oO zgQc&f+9VD3ORe0QWQjs)exNOKpxDz|)|o{M4RfB>={2o;jfO}DQ-x3>O;WMGK|-6R zp8y|~;@S>FY4Pv_T>8%e)OtPl(UfB87+Z*T1rNp2X+NsZ1qaw~i*!Jeg=-5)$UmNE zjh~0{Z4034^^U*YCy%K;EtpW*-^*5AN5A$)`+ni`9T*W+pXA)1ce5(>W&NrVi>s$t z;j;j<;N+M$NM@S{F=hd~?acUL5`-6qJcmgK495F8(@&hLSl)C003qtA+TscR@`9J7 zgVdLaNE(n5Mapr9<3Eu8Snf@r*d?c(3cjFBzEmgVX89r`WiNBu#9~iFXDTRelNS?t z;hh?#J?zOj{XUwS;jlmFe!EXM!r1r7!|GNJ00%gw8R*e?Gx4tt+3238b3S+EWT`OF z;Rm<=b}f+`j_~#D{*g4Ch}#{L%8r}M(^)nSdV$o#*0V(LW$h_oQzgjA&WSpGCQO)G z6b%Gs&HBCTnQnj7cG1V*$2u#m4(2Ol?j~3BIHwI&OIBj2zudcLT61~JLCZMv>sIgN zB>`tQ+b-gs%hiJGT38aN{7;vsS`eD`_HS)8Y$&5y&tHHQeTim?W$gKR}|Tm z2bA-oX5&>j^c6?0>2+E(^y@;GZ1D@zlcJTnkT1rvDS~;ZW7*nvel&4DO0JucZ2s=5>?*mH^o|zK6tNW3@}Qno^eaTBKYZgc zWm3uV{{WsmNN=|O)iL~F`)XoWrS)EVfc7s~nf0K=dNhxG{N^zfyC4cqqW5JoyX?4` zTJ)9Ev{uF;uS!VrB3}GHh~kyG>0|vKwygcNGvih5CsU0=-QcozhPhZ%W*H86e^OJQ z5`z5ILywprk=bO#9KMY5Pb>n7WZd4DKc~s#&YGF zBgpzUrim!_q-TXUUY1;x}Q>q!G^W*Hkrs}q{Z!GyNqttN* z>Xkw=PE);zARq!_*q6`aPqiPF{&)FnpW6;uQOcDa0bL2}^pBwlqnMGYFP3GLbBoL2 zd-Zwj+vAnZLcd(aFl()5&$Ih_aj53WanjJ9vX|6xnvtzbGNmqmTdE~7jQaSJ?$Vxb z!i}8F-yBm`&0^?#NCdt-B=&-+N|dll7xk1L>>RDV$Nnt>tLIZfM(X+91H{A8EJFIE zq-)M=Qg#88{+TSj#ZF4W9VNZ-iXy7oItr_v<}s^hI&s-jaW2Pa(fOcb%4%oaOzTUU zdcU>qVRL4baI3Te3t*?i(JSa{6Z}d#*R{@{>8e-A^TT(8OC%x1ezwUTzuX!WxlGC_ zli25mi$3}M-}Y3}{x1SaXQ-UK4bu4iYI+=pMQSEW=W+V!wmyMlq>{cvM`?+upEb`S z!1-|brQVGp9qDwtjjOo+arC(HO(Xg9Pt*OC?v;&;7Nkc$dqnVF&?;*2GoM@IDs`9M z`Y!4_{wnA@?6=a9KS2Cb=EE_^4$6UU*&tUN&w9^CpXjc zzdj@Uu+Mi^C-AY~AArL6@*OSt*yF9<&)GR@oMg}LjaT%)_ZlkK_<56GBe}_7;#HsM zEBkZj`uZN4gP+sqC7r%qJM@iex^N?!c>;nt{MwGSTG&3jP2#6+66u>76ZH(n&e_`$ zVN>Hx9OI1GvHg{koU2+6DWaH*F`#NE$w2EJ?QkdZ1XUMN061zCY2y!n$IR@;sF-K| zJs3*H{O_+vePF{K_|9iCW-_$$32eIFqm;INPrlTjtwZefzhGRwFYQu|tatK}TgPYN zP2~PM$%~^-zr8&ykFHaFnoTpRHf)mB@T{V1$PP!kV#UmVVn^>n&%);BpG`#WEi?DH zH4wRjrYJ#(^Ln!j2-xhcm~<@M{{Tdhx`+8Usnc)%;nw5Q#4z@yvbw6kKLKp-l!q9< zCYfmfXn_$g%P`gXdio&Nw$3L|tYQIK9KANlYx(H{@)KIk{va#!Ei*!BEKLN_Z#?OO zV{r;kuGdyd(E9R0{jWfatoZVK^|^pe)-9QWrFj>nD`%#E=c+w+ zwbW2E;=#2LlSBkTVY&YRXfFff{!0hX2QWAP0H7iB{(mQvq`G&z%tzz=AI5GL`af&V zmk{sQPyDo(Wfxgm)a5HEW)H26$(FST;$O90T+$EY=%>1>-%Cx+rq0^^zB<3O&Vv;a zfA5XUf_S-wX0Zw(F1P-k{^aoJS9iI^)8|(eoTu&fU(Ec18=5Sh;cr21C6ZkIy-a1g z{(Blqzj)mzY=y-ufY&_g4=1gx6?K>@PJMVfKMZWdI_EBGUbnhZ_s$(IH%uUG&~+>4 zaID^F^(3t!x@Jk(*Hf13X~#BHglm?FwkNKJ(B>r_3lSc6WKT}m{{R_>qFnFV;FF%v z_I-&6-PfvLqgH|#foQa(esk| zs)T?^p0%+-C#1jI{dGSJfL^rqJ0JBFmeN+b^!2X#!PWT*C$XI$I1}tf&}w?fMUAP? zNAlqEWTsRg2Qb-9-1tkP*4sj8su{YDLvJzoQW5pX-c0xj*5P6av$bkP0Oo@J%HKGu zq_nA}2liK+?D$g)2U2Xs0>{w7PzqWn!pp;P6aCdreA^e6ktj%YP9NgmucfRdF^Mm6Rru%Q zC~UmWTB|!LSvN|mqL{7CZHQo|IZ(OZF;DXRs3@Zw@M~DjXXQ61<*Xu}0wDPQNAgsf zzH|Qo8U(UcvkjZ%cCSx&=f|Vj=x7YQ`HAI*&q!~N@luJu=N0xIN=6xHz?%nmH#7A& zcbNR`^gk-C%Q!h23_ss37P~0rftKBoe&yj6GO?Ef6=XjI4KyhK01)KZqD#Yw)V)T; zHv-M$+hyrnCf7`0i^Z4(w=)PlS>^J3F`j0YBIYLS}uDz6L!0H z^1eC{dRx7JQ^ia{Sjld*y_%(bvlfZ$t*y_6NYuWjk0y(ytFg(7yy`3%$(2sBybbeR zxT|+CnuI*2csW>qJ}aE*V@mb^0MmU0QRhX`eLAJ|eU^r^>p#I0GoLFc`B*(e)Nl)b z@L()(dCpfYG4Oi={{ZtBHA%OX+&ov>gP<;M)aFg4I0I~-SSCiR01&nXP^u2(SW>A0Nv6|&5yqAQHqIYEMazW_=g(jn3Dmm2PYY|D9rc~xK zoue6)Gu{|fFa_AwP`7BB$>;jmD0Hgl(;x2h&Gh;#=-u0<=OobscBZLP+xAM@H|8}u z^j%OKyH`WW-8!YYS>CnoQNFV}OD9yz*6*$UwT2@k0YGLcL-i;1=q*1o@pUI;w5GTf zpaT$1+2`NWnX#$r`>ORF3dfa{c{}2=->-)SLO+dmxdM6Bvg>(uel&JI!hHFe$^n!qu#b&-Hqx}_~s{29@U1gNltrtvr7{{TFOSe+;sgw^ zXO+zHK%VFk?yq%f{@)_e&-;H+_J1J!Vkns)ba@}Rsl(9ppx~9XPta{jgBGi2^F~U~ z+!LvrVUhdW z{{U&T{{7xG{H_lgBj)t>l|53;5lu4JN3#>qRw}Q@cRsEpXDJA*^_q^4U z$@VQ~%HTN(LL+FsKnzn>nty=07Q5_esM4Gkt2VxLw_8VO_B1MV^Y7-`ht$N^sxs=$ z2cT?{XLx`YwZ1qB4|lKi(&F3r(>lq4jIMtkhtt#h8miA^FWc{(`5Io3YS3!YCyr{! z{Dbv0hzi-ZPqtCh^)w}bCY-2iRZjdZb*I4Lk3@%kbo4H)hF{w?lr@%h;kT1`4qeY= zYMuK|P~}kS+as9D-ub8(ZZLP_6JtZl;_?s0mD)nOKaul^U4E@6%axAeFQ0PfhPT=3 z6qiEka8R$U@$_=E&*%Gz8u^Xb{_)M)hjL&E6`6ci@*wdXjH7cVH#opd$n zdI1aH$XO|l!k)`#!T$glaS$nfeCID0{U2WpRJEP}@54L+U+3o+f*I%XEcM}uZ^(Qz zGyc*40H2S6@%-^0>KK#Rc&tsSyI8TMrLrwDZ6ECszIWzsGynMpHm3b&t^ZmGDx|f<;>%#ENm5dZVy`DF+rz{(k z?FE%$Ci?>9E-zC2f}lyYxIbwJrbx&@sk7(;}3tkWU_>k~a;=R2|#85~DpxP&c z^DOh07&aLtL-3?x0=|_9)h2y%qh>8Ixo1WMQ+(bP*c*=Ku@3pDruPryeDB7_$?4}s zwlk+^Kh-UrCv-Zka#XtzEh8Rtxqo1g!2bYU>P0c4vEQ%M<#4E*@;;yB++3l%GewoB zBWE{h7R0p0WFK3mm0*L!SHBamkE9RwP?cv9sPt(p9Yq+qD6RWd1?vP)UClr&Kuc$g zn}o$rh&TLz^l9vV+>Xa%v(m9)C$rA}eH1ULr|GGVVu==X>58Q%eF8Gizu)c(o+<+k z!T$iTettdxeh1)lK7YXf0QA57KOZ~u`T=V6*v$b_1Xb=L++pDly@~~Ztq*qjv3}34 zNuS$EDF|zR!NDe zhgU*%bZ@(vH|_DPu{2X-UhlmdX&thG^Seh?MspE>22-g_ZakZ7x?G+o4!_5?E--|( zJE2AGp63I?-ht#pXV!G~u0K?WAzPG>b&3zNs6QkwGc#`HLjhjEOCwG4+d_{4zVX6%f39(5o_t++p?2>|Cv-lM{RoCUgB+ z`lSxh=k_+sQ_U(K?uTr;MtuJOu|`V)YKb?ww-{{Uq)6vOV->n8H;k8+Ct0CeWD zRdg3ldmU#XSZxol@){#b@fPGFm4FY|G&ptC$$%gFfPAd%1LYpS)kTX(SBcE^s<}FS z39g=za{B)O3*1lb^|L0m2biEgaL-w$$#eUc@-R7>6VCR)ur8EsbGbwN%v2#r{s?}8v zez+ZtgQ3cYnbU=zkI$1*Sv;daQ#c9@e#)8AH^e;-v8E{r**81xa_~k~?sj-_R9Vx% zrLVW@dQo>pu6?lIKFUH-&rwkN+Z$4^s->^!+zXUwvAkG?ynsG`0YvV1P9-t|6-3WB zG4#Gw^+0tEn{NiMY1t2?03adDwMagnFY*p&L%jHT>zxYY9C1%aPF`%In(Tc27PS?~ z!$b(dqJR!!2BQu?20YLDe;N0#q2Tb=UyE3zDj``9AsZ3i%cu{rTINb5$Gj7^H_To`$QFaw)r2(`j<&h-?*gy`yE!_#{U~9ZE73YL~p$ z(|rM9ob=Hj8?y|(ioGcN*m(WqqPh~gI+S`-zrIuc-9{vVLz( za%jHU+4cdTmCbD4VKppMn0YtGqvbA{>M;fZ2kG0=!}itc4=`ztBL#wa{zu2>e6#r$ z`Hp=2=Z@wWoTJl=jn3#Kl3feB2hWZS$H%o@GfuhWo|Y%*rT(A&&Az@_O2F8*mA8%f zXFhI~flSxm-&FOvid@I(b`Nf~8@e`9=c61UOCSiftruyU#S@c=w06M>;$(P^u+?1)A z2KdjYaS!zXr5M)w{z*)kZGYMFG;{v|Ezg%*&S=1u()4rYq6(+2u2>z}_2jRfCuM%m zzv6t+^t*n1T&@pKgbC^NyL!LdeAq9^yjNyEwxvfvp^@*m+ci6hN&6_MPh}b1CmP1B zzeZ@?>*uH@&pEF9YkS;}13KrZBy+UgF;e_H7+b6HGVOk4dG%)F6>gE0Knj}C=?Ac4 zTKVvzaJWn4{#~h^n-8r`NG#R*qq%P(Lg8qA6{+-lrq_wSpR{rWjHb4Pize^Jd;p>x zKzO|AUph&hGx^}2f0Ehjsy56=t>15n{{T>D`1xZWm1{x#0%b-rIUK&8C`)h#*D zLkTlFm3*RRFoc))SGy{)Kjb>ylD9!5k|mnC-Tlv6P5jd z-(Ol);jFXc2n$2xH~fDO^Zx)jka{yk;yMXC}_^AlARKz-X}xVAu|*9 zCQi`074tJBv!{n|ZMYpC!Bclf94+V=*2(Bc*a6LHl6|c;;aF%F`wKVA_4u#kVmA0U z>&*WE^qiU8YqD)jw|l)=C?>+J7f1Gd9UGId&GXaxmIi8$#u_2@Jdm%N+*m%^zxvksCM)C93Wd6hB|21U)?#p8A9Uyw)sU=t@Z?+X@)=eRV{B@w{OMHN+`PeoU-?I zdOatqd``LG7EY_|bn#WPT$KxwnK9U_NjrruSv6QNhzgxff(soQ}86m?;) zd9`Va>?~Q$^27(}HSDo*XY1YAv!i6~KL-8crRkm3x76JuCK0Yy{{X2y4^b#eaP|JW z1<((|RPF3~!^emV}UErME? z+=K+_V+agta-3_T)DKO=Fm^;8LH&AYT+!YBJ)T~;7R1vb`xYNnw%ZjBmVK%Z)a~N_ zIIaX2Y=Vh07|)|Z!D#VyRLqH(N`5hpPoR+XtkhWIBzk{WJ}4531FG4GpVwFW7BB^#jPA*0LrHP9ibk)7ayghs;vgPPfM9CPEzu{cu{i~q2`&7K%IBj#MjVm%^%>l?8>@>d%{Ni`4Vu%f6tf` zbtuElG0%ccNoMq<#?wDZKOxjlL;QFYP5vRQ)`Ib<+5;Lt^3iv#%KUFR^7mk<6vsGe z?`^QV5Pf8`6C zFeH(gT)%z1uKW4DM)Nu;-HE)Gbi!%{cUp`Vsc(EUFQ-D-CpSM>lVEDF6*ofv0Lrd9 zxi76Am-C`4IrQq2JDS=gI4+`yDFRv#rA12$WLf$>L`tLa?$70&8vg(%JF+!&Zm64U zR|h%lT%D1Pw!O01%MWm+06(^{?V4LKxu-v2NvwH)xyhP#tv5L! zmn`X_b^VpJ*QIeJH+%#2G0gBO$7iEw5-fR2W4}NCtcw|*0)Gm^Jz@NU{8$aIqr!g# zUal=&CD*CG)&AcX-q#Z2V0zV9%NLZtYkHIITdAj<$$;Pb{95K#v$Ec0l(tqA$q1KgpPMb-q+=Z5X_smVjZTirZ8lSf@3;m#3h}{UePZWKU2Y`_RNZUV7sn@`SL9 z#jg3TKx;aELF5A9%JxtWx8t=UbsUpfT<*nGF)fpmY#SFxhdxJRQ6SOF{Ckv{gPp~( zllFgV-kW{*Bq3WFw?sJlS^^g32v?YV3_MMfOsaPZ2>GN(7r3=+~A70&Z)x2Ck6|^i!tvt;?jqt^70z z#IALW{lUlUBaHdZu77zwN%m-ptEd4w?;vFqdq>aR@fSh7Rd%lFU;{c`Z|p}n+wJlC zU{JTwO+YBG>;l`l4Owr!G9R^Hn*Kf3=YB)e{DZKbk@4NlRIKDqJ!RNGzon4^czp8vU5>e*ATLp_HddEeKQ2vjp`G3}h9kCS~zYzJa z@~_D^-$+pu##Om6N<;O_si}I_)&Bs8Hd&rimp_l+x_!8yOM8P4{d&{wDJ6AGk zj&zM*QF5gm*hzDBzbU?+)x70;xNA@PF>!ZD58o7sJr=oQxxiV{K9{g(Mzc)TY=7h% z*VwDtI?d^}pjDu8aj3E$gZG-udnns-bTuMR2c7aN^UPZbtg>fO!?SJ0w>9#tTtGEhu@#IPD@`58_}XP&>|<~yb<^t?a0bn5{9 zWs4$R1D_rn`sn^g_>CO)+XEw~IP}14=Nx_sT@^0_q3c^G@dx=W^Q0EOf}Rhhgv}01 zd9qQTlE94)yO@cwT$Uq7^>Pph!&15<@PIype1b{;0N8~cXVBS|_^(Pwr`#BP%AKiU z>f~nG`*D4JFr=pix|zv7d6L{@X03=Op}xRnx}XHmfz|wCln+$j0l#4Vf6Fv}97@Ke zWuCk`6i32H{^i~AiM|@O3B!unzu2$qlY{N*O0^82dWK5XFqV~&$Gm$c)aUI#jz!L$ zslxE8$*t&ywDIN8o=3JutnmiMI~E98P)-&Z*6F-1taHm)7z}Uq4FV)SrLX;goRX6M zziV6MUmnM}iKS4#pU!+`&Cl_x@so6w{{SMgGSBu}#|l1tNbz`nD$M;Ro=NFM%g<;< z0^-1KC1f53;-dr?lxn@1??=cxf4RnP{awJ}@T1_9#)S2Hwwd+&N@tVS=R#TZpr6Ox z@g_MPHEDVpmwB_~y8Sj@#}`@|8}@IljHJtt(yLIg&sez$hiETdZvyp+F2ucq2isk}TT z)%wuVnIhd^R_1BiL(XR*bgY^jV61BL+!yXAY`K6 zk2)&|k#irH#7s2$$Id)q?Y4;y8ddWr^6$SmN7?+!lAh( zYCp?tt^9ApVPW!LjKv(8FS5EGoD)>400zRyE$N6Kq z#txV4V>q1&X;1zy@>Ew1kh1_$aJ90zvu1ivS%JTxtfqV?q{SMo5XP>BYPIS4&xO(} z0#T}o%hOPpn4iko<0)aBVqb^cx@yX3Tx6cFQsfBwRsOQ+i(q~K0A{Ryd5k(~-={yH z8;|(Y66*T=^|zeZnIKc?dvO>UI{mDT(XY)FRaHXx?4!@vZ&Mnc0#*yotdMl(X<}AQ za6LHcJ$k$^Fy|XSvh{569i^q2(5^Wq--A7GlQa^bre~&1mErr41y#vCr{laK8!Q%UsSr zuZQGaB8PdR)G(Iq!*g?ej3LbP9sLW`Gn!|oegPu!Mm(Kmr7GpjkBQfaBUs~#V$o%K3icy+YPwP zIry|O%cQf_)KHzV*{*Ty>+L2{G@G+DIxPxm;#19C!_Z4P&gw_1Z#|{um{R>hZHfb} zp0&g(>GT!+!`$?_fuE*+H(Kfa%0;<+UmW>h34HzNwM`vjshM9kOzim&qcsw+etxg( z{+`CwDq*4y?ylcmle}7nRD9DCdx2GF!>m`z{;jm2Oy)fzL3MvI?>H%6FP|SS0Dv1& zAZjW7%b84{pZ984GRo>Lr^-p!_Glk}?%2+CgjFXW{MI*c3b1un#8|d?yEf1p3_s zH+x@@BLZys@X7hOJ4|xmXY&^J9JuYhrmdeitCnQ>Q_*XtyCivT}{!1A}Dh*iz?f0XeRVCMSDctj5yX|7rj`$Cpq0%)zV7$Tpf5@4c<KqxFRn{i20biEvXlx|kq2g}tR?EDZ0o*!j$n)kdkNk*K3`_ri}9{{TNrT|cd-g3Zw13V#4FG_j2`Z>#wd zD9_{L;vgToQ7K4B<`130KQj z;*lvy(Hwg*gUr!UwS)VDvYQ9WtJ;lge;n*?^Zx)R%dh#IrrhQ^&wjqIM?=~ zbnkv8Lh0ysg);(;4cfZh7x+gv&XX}G=1m3QDoG^I7T0A1@-ZbK(I$O@(j3Nb>$Tl| zRB)&1bON~ISJ$DieLPHYirfuRZKE_OJ6$V9TU6xXR(Sz_w~uR=w(Q|*4SvB@h$45h z7o-3KY@R+=&a=@Jwo8-fReK~_`1J_4QedN@e(A}Xzr)^v4Rr(6HB=S(E>QqM8T<<`rsQ3@j zz`ag%TCi_X^{X7ZS|3_`Ae>kQ^d`!v8Iz)~QoPW_YmK+OzaXctiDwQ(^rEr773q-k z4c}INf@=A9)*o6VO-paV&pyJmtd@fm_c%#*tLj+E^FVig(&A!4XXy4cJ{n&l9$)11 zGuBQ65PAOqpLKDPR9`3H3~+jfKtKDgXP?G+pN?n!pUA&S-nLr^A>OEdt}Piy`x-!0 z{{Xtc+%Zre!xmIy620+E-to$_@kr+UXWw{q*992OQ+qtdQO)|smrXnQb97~`2{v=f zqYHWpMUV{Mb2^1`r|B{-jvHRzT~$F!N^~em)jDcch6YM&TOb7+X}1QGz?pLXk(~}) zwM*Xw&W@a)d#<6PImHxPmRcu&+o}yu+irx(Vy3N%aXpmI!RK-nim#53vDuMnPaHH7 zwt@tPndfDU2H)@dq30z4+33^hE_oTsVK0&S6Y*#93Dl`0L)B9WGCb(ZYh(+k!w2Eh zYtqLo34Udtu8(h~jy(JitiFP!majJ^2}GXP{^OIObz#eoclT$n-RkiA%k~NIooP?# zJ}$ixr5p6-bqv+LlL4{Lz^5Wkf8PH9A(>nKd7h!`RZOhdkc1=;{?)C=NVtTD+2!)& zPGgyXOB^N_!rUqGs%Y{qf;qJ0?NZnN0Gz#AGvu6{{Y-DGHshL%p3-` zIu>3Pr`RJhi`WiNht)#ACrNO+JjO{!g4FY#zDN>h#dCZa>(qJu)i2>drSG*zs<5By zkD>s567#MzS&45rd6lRb(!^{kv3?e$SL@%OU_JV&3S^-H&-}wb8J125W1~o>X{#o_ zYUC`!8I~!-sbn;;+&&5(dq{zQVN}e`pRN0}WeIAKC_nFcT=MZmy8?vmV_UvjGmN^z z&@(mQqex{h+qxYKx|8&%<#Qp}XlF%y^r_xn(zH~iQ|3yX`V=?ve!vH4EoMp zk|qV-#V7|Rj*A-nDHO0R_!Q5Y^`KaP>(??#?Q+c60F}zMXy6c@F>Uphrg8LVkBJ=a z4-*Co^_uU6u!?aEudC3ib@kJ4zca$Qa;3?5q5lADH|ej@f}BQC>^d9dq{8C|DW9O| z_3Mj>GcYm;`51;Lw<7+(l|G}@>TOI}dyal$nhkf*lb-`er_UDgSImbS`6GI3WuSDy zDYKN)SJ+)PvYN-8Ev@IOXx|t1&FpS9tNCZxhOfo8$IT=~Cskidq7rl2&oG;vL*G)e zR#iy-rTdavuSBRXYiM<4s^~xC0JU4^gZ^|+;DurGBq8P-{e|)VFuXtb{{R(qKN%(R z`?GV%GTEn}8CO4icIY4NOLKl@#MTf@sg@8 zl*XV;yu{Vn=1chnu9_!9%C&U$`Q{a|y%+VkdP=Hje{GnJh8M8wcTPTTY|&S=x3~E3 zPw}W6)KJHw*CU3~jy=thu-XBU1rP24lFjqtF z^>u{D^?TP#FIL9ZdA&B|dl_L1>e_e1e%lkC?T?SmZS3Z`$I$H!y(6tuwOQ+3W~Gvr z6@z|%k^3+Wg3M-b#u%-MouIbOKqibML)(p5i~KWB`1?(bU!I?m<|5KUME?L&@`aBH za}TPwhsds_G3Hqg{CpO&H~FJ;Jw-{+j%?y|drZFKa2`r8mXR>C%j?QwYx8dUe!9_; zFZl?s(2dlG^99GsaM52YwE*_I(5^UG9{sh;8WUoLE%~C}b#~A`;?+>HqjbviV6UOg zs$PgVRXH;pKP=y3&F`QONS4HZVES6Mn%KYs1^M|GP}t4q&*sDcPMzXq0^#eLk=%YCSK0N@t5lCS!0Z zb9laA1=*vH$fndxf>aPSa++nO9MSbvM8l{3u~TAhWX&DG*^OE!o;p|*bidf?T3I}j zOB;e| zPuh=J`=Mae<_S2QN1BQe^7#l8pY6{(NzO``y`PrIeNlU<vIok46@ZET4)Q zX!a~hn7Xz%Vz#3I$rcT+`Gp2f1-n&|U&aVS?Q5VQl8wSD0C7N$zY<$`-ks!?O6Iie zUyhMWtPK1m#X0BN#I=%sV~7!y#7;wAyF$-+m=ezl8ov&jGQGB9zEW~ZR6kW)a?cgO z6f7cpC?R$m!GowQtIzm{NVwv{$Da%owqvd^Qj_8-k|PE@{IHcU1HgLAR@V)5bw9F@ z6Hb#33wUQ|Fw7z%NPH#$*y%&K&j#PNJ-RRd0FTMHFZd(V!EC|2%1dU2(wH+Ss$-C{ zicwGb$5UrP!0FNBK*Pi3l0bzFIKGe=ybG$4fyWg3zkM^T{#-6Z^hb@z5K#a_7`7lS z$OtwcIXT$8t(SpA?7ze8cdtg2R*=p;$}Q#8S)8gL3yc)|dX_1)pN^z4foEUCne`F4l8 zLF4W?{Jkk(E1hr&=Vq4Z7C6$h2mb&OzBY4>DvZ}}6eB{#6c9^*TMTq)YuUCysmMc} zM?xwc$h9zlcaUV40@km9%2C9az_#jY9b6V3UDfS4l|c&ew^s%To97^ekgU4sP*X(e zun`SqJ2Zou5ieBznx&TSuXMV6b80BzRY4HBJ<4_#DjGHY(Tu_;ao827S&(32l?kM$ zAWftr3J?8ipg{is%zP3&BcI?RzY*(*0FZVD{{WansEVDwzJVC25kHoJl1dM^i5N}m zn5PEnVyJ6ARC@L}lR<-8MA)rCmL0fkFNBW0r_Rv9Qkh+>a9W$&?lEVDKT(>UP$i~ z1Zl=PjcgnQ0xiFI^HL6L{{RAg_H}s^H6@6BCFsWh@JqBwfGb-gnDWaR2;^y8mSL=`MMz`;2t%8KepDKew&AEAf#;k~J zU`EGu)XDh%i-g>D*(?+jFKB9%h!4%m2aV`UkzBeXRs~!dN>rJ=-V7jpw_)=Ns+FbX zQ6?k6Z20bxEKe!0Pg|bkM(r(j5QagpEU@UM!ZYuwg4{2P&|C-u{{Rp0Lj&`W0ZilR z7^guV44fHebv^ivO{VTZVB-{?{{TaLCGjRBVPswZ0N~EK6*IjA za8uJckzk7)bt`-A8jgXt?XrUqc}=06s`if0cZjqEJ_&m^Br*iD6|8aS4VbE@6$OG{ zGJLdOedu!^E?XrUt%6{J88JlLUFqf$)JapL#=t+5XfGJX6VPEy9?9JXvw-MEu(;KI z;WFv&`-5!Gt9sD5M7E(Z7JHRm@Z77fT|8 zVwg)z&`{TV8HUe8FB{9;Gs0PU1Ah>z#wn69wnvh1!on=h6gY{=X;wr1ayIDD7FRn& zjg$BnaU&0&^olw6-OtohL-4v*L#&At?=yneB*=XASY5podJP{5aEmZJ+DueY&I856 z@U`e3jxGxz^ca1J)O7TAm$2ye^O%``%PN~9n(axQsxH#^3cx@(o7Xb_P#wIN&~{8) znN!kK#)BT!tl1jzCh!Nnh~8S@zsn=XpI`=N;#J-)`cR)Ot@fF-RQ5-N2_;RCmqKJv zHkEt93EL?$4gt*qsCiuLoD+q53-a(EkJqdZQQ|E|aM7HVOnnB~IN-9TnC;$;X;YjL0C5L%MjTgYF zG-#K3` z4aB{m6a>w%n~IajJ)`@LRVN|f6aIIPE8=Tm!@7d$Bo>Wb(}*lNEE*1s96gYUr2_(G z`#a>MXb^S^lYSV$iNEi_RLD@dh6RopF7vl! zI97qjWvrl_E6loU)R{=#3Bo7z=3>h|cKfOPZCE``6B{7d6oO4%PVH<>{{T37pSZAy zF_f}amwVI&dFpIRyGYv=G*>9Oo0Q07BGyf8n@2!DP{oGFr{wy3c534`yV}d;aa@Yyx)#~1SPbx!eyBq2(eHgpPukT09r{(GY05)S$wFJ~Q`3GT2 z0`MLqmIEz{@0A;~>j;%Rk%WPsYhoaU<`^1{sGG1`8s@w6EjEbtfx;2J*G|TZ zSVqyu?mtk1;MwQ2O~D3$ns<;jG^+yy__7bVtjrwG%Aq9ho&aHGjdVOj$eN#RcoXLa z0EQ0q#8WKn0IP-u<75a^C%*c2sR``1>jMqvlZ#kj*;IC74R*>uCID{URF?GQKFWAjp%9d3hYN|U7 zxs@`$6vAqiFFZF@0kVn0n;VRedV!q(0NbVF$Xb8A?k)XOgcXVYmg~8OG6pJA6(OMt zhZ16fd;lBv%4xs3Nrm=?9YoCZicNdcn;C}B{{U93%CtR**c8YNB!e0%2h)y3#+|iv zOMU)n%HghD#2-C=ZaHAbI}#tx$2hUzPzD-7Cb(r&M~w!!O9Al>Ip3_h74a6da6eHf zf^n{AA_`BX*Q;~w6P}Kx#2#$Xc|LlzKF~Pq6?VI0+D(&Wocl`t4k-*{YU4^3_H7Dw zPRx;i5D)ng#X>P01`eXRYkZ(H1}AnNiJaoZMPSiKG5A*KYS0|BDZ4f?$sR#E2y9l} zy!Bvg@FR)`n`fg|2w?l}jmYDbSFZ8C)d03M4Rr^K8I+@tlsfwjj?NDh%m&ILnV4}_ zz``2fqJ(WWplTqOYRKaz{Nx=06dv2+#d_$S_`&xrw)uVG4e6dzvOcD$NI zy@lcyTK@p_8BGX5EOf;!0VBRHijyXXoiy#r2FG$^l(RldYayalVgJrekcUAVJpaLM`Vw)fh`#r?T8hqs0_d-?g3x4 z{hhEUO2v)Y0#(JBR~d4|oQ>IHrj#u?fuyl$x~&O%&GVLRsKq@h+ll0xYk)W-+5rAt*-w70|NIdHK*-mtl~>TLv4|3olcy>fGDV& zqZ~X&cpQ{Xz?;?moH<ZnTYDl9hySxa(0h1!ew5eC0jv-FZET(;NcP; zJ1y$JBc*I935%g%3W^8IdL-hJ5`{MCmT$7_$c+}6#+z%>WFT_w$0OCcFy`3F@H>(h z+{d&SU4L(%KG}MNa)_d!smXvmP8iI*Ot%VwrntEmn_b>; zwO<{HZbRUmNWK(`M^a4i&~VLoiAXG^KEmmpHhPqgO0?X}41(8)4&|Swk^f3)hh>hd*?+jHjynk z`5rlOgzs2`LnOULX^t$>bcoQIkg^G*nXBZ*0+yL9Zs;F4?at#&SPr6&Wo#_EZ8Fu?TNJqM3`K@L@W z1<+*qR^zK6#q!!?Ne6^<{?>K@&@fA{K{#is*>DZTI8%he{QTXYL*n&yOE{{Viks+DN*4r>UGBa;HM zPf$giY!dLJHr+d7#_RxefV@D0*SHQi$Lv%Hvnu|${g;n-`qR;jh$C^@_)#I0m@2*1*V1xK_(Km_~wBB5Z-fA%#+od%=ZI8tD`5Ec`vT zAE}llk4KpFxWb($4llcG&BsNSPnl132#Gc?R+C91@JVtVN2PNG|ASksz6mq2h0K_VASj6eTxob(|Sm@e(+YjErz;Wk`Mbasm z%p{a0AN8@L1MNrvuULJBI%kE~NElcQ2|w6YgO4G*1mX^Rs1-^*0iEZ_B@~A^H+AOm z%}a+UFNaNADet=-drPR0T8H{maZ#`?b4o8Pl|VpBs%secXX3lIWs)YjGFI8o;8QZ!5QcCLM< zEM;h@=)%1z0r!6kN6c0N{66gNs@!ox{%QZVPacl}j6g zKpumeGCjp);t)KlvPws91ejTPi7N=kziO6_jZK?gojpmOV-c?cNUmoCglCW#(`)ns zwN_M@HLQ1Hdh1&}_NFCjRNFq?jGGi)(V8bu$j?G_WdiB%Zr>Bh^4IinT^bTFg88Z;F zQBesB36%`$Egl2VFOj(SN|m^R3ph-dk`ETRIDnSk+NgJ`1)1yTCD87ZEb8%~dguH% zaG{f$Ov%{9Pls8z6?;u1?JG;?wi}0b^cjW}1U(j{r!EYA*$S+LFN0rFlU{I77`*=gn1{UHl33;@LLvwm zw2&0)2QV>{pB7qYqdIB4U0T*i%}#zL4ym`WS%$ZE8D9BFcI!*Ttyn8&7g>q|dY;8V zGSj~-5dEH8DQy1$89CIxL@sghl~G!0BpSpLel0z|r(w=^TR`D(HxQ*Sh1#*hEYPT9 ziE6Cb`yWl;d2$AfeXh{fIFvhhe&m)$gD0--Ub&KX{!a%&5(pKO%$G(!L6Br*$E$dY z4@6on)Wc}|g4_Zi%`EUYWe<}j%{#N6#=mV*HMe8|TF5j>zlCNU5U<_JUc<@~dGE0h zLehkHSAu+K91Vr>)WtG@L!6OK#pYN^3CvJ3ZwY;=)`%ww=yEhWXaV?0e$EdQGgN-b zswFX;iEl+anopqwt&79zP(0?$NzZF1;?P2mX4;QHVE+JhXlI*5i!6~={zyHctOKLc zswN@hk;&>;rI}3S)$c4;Ens5&KilH<_$8>YBb`ceGEY(~Dc_;)>r&I8UhDc=C7iSACyT5WsZdk_3$v2)Lf0zFNn=PWKl{k&y1Is`@G+8R~ zsGbeCrlu*Y^l`~Y#xs8=Nw12Gy<6S3GoQ~F2&UhwOu+e#mc+E+Rmik`5)qry#(u1& zh|$vx{X#GGMJCBX7!WgH22Z2IDEEYH&}OMk+7i8%X??K`N8q*%5%;2Dky)q(m{inv z1#H1p8b`OSPmQh z`~g|U6JeOPssf1?{{U2>^8|x$HP*Wag427f-t6@P;4jN5UxJwpnSgKL^P}TKPw6e zACtidxhyZ*TAQ>6xg5w57!qphG~2$6euW{CSU;PrtUAMsAnF-(Hgq51%T+_O3msxZ zYcXyd2&T-zX48j6U^y1xX>|FuErJ&TvZ=_<87HUAFwwJe$CJDY#H4Z3LF<{B@3#u*v^iN~(6pzds+IX4 z19(8hyhiSJ(4WqSw1ABv}dEe*I_O`{Y@V~Sej=<@5X%7F;c(iaX8$BDp# zM1qv%^&1gSpArD5if|z+!EWH2Hvqz=6TD9Z9vPoz@Xv)k=QX_kdJZ`M08Z_9oLJH_ zepyf+JkxZ7BrBDu5XHG5o#T-oDYsylBFn7KgKwa0IxfymS_sGjIUj5Y*G|DDKh2~y zyx=?JAUKv3vHqxUjJ5=klS*HVi*r>Zc}savFKF@y(V9ZHg_g4^C*CP>^@N$SS&|Ad zd1gcgER7@OQg-BBuGo>)=jVVCxgs|u895EGB}i=*1e|cWeIgKQrg>ourxt9_2YOGQ zVFEb&rUn`E*zoEl@-tZoU#A)YJ}KBH=C)&8^Pm;+bxqy`x!BI%by-%NY|{yDJl0j1o{9K~s=g1kZ1gjvBPv zNRC(!u0l~Ic1tgucK1ReB%6`K2T$`DF12xmVd24JF5?{=&IKV{w5P1*sUInE11y%% zaD(z61&RGt&fbLRt+yZ7ycLQaKXWGeJ6fIi$5*%sYXHgCL zHC6rdHDPk}*~sD)xqrxGj$BOSdL(#@ej0Soz=&MDBa{wv7_x&e{XeK*LgQVFb8Ce> zb3Ak+u3hyU@|d~tiH0i%ses_Oyn}6_q)eaa#k)oco`o_iIHw~%b*?=%xS1J7QZy_c zEOuVa0IN3@$(*i|^=K;@P}<~5z-{H`+IRDeDF@C@+8~;&rt7M=RlsN|(D+&FfA>p; zHTh@0cKR;2iu8nbyN^jqfpPLSHq8epb$J+(qepu;Tj3cuf@3s6k)sV?8=eF^zy&C0 zRm=<$-)s`w3KKC^=AJ|x$E>kvUF^6W!`Hhe$=B7-1WF3%XoZRz%I0IlFeU#0m05dk z1Y;dG3dsVNj-7gEq#%JGl-Ue2HEFtmNh<~g$eY!?1cs=kdem5}(@7%+oY0IwjD>+k zMRjuoB2J@oddgsgAeq3c4yT;!McB%lwo1=gCu@9wtW6F8SX%|gd>uX}WHz+U5||-> zTRWeD;664#$qIP8W00Oj(nV@DUm z>9P0oHi{Dp-h-aAJn>;*`Ud1!j~Z>iQ;Y1STfAlMxNyiP0*)wv#KiihIs#2|=H`@Z zeN-dOW9ZxWF7wPHAzUs6+oZ)+{+abm&{92bLFH&qrNhqLUNgnmxHjd(Xj*M=T>}QJ zm~&CZg5q;**gud=;1ef8&ve2068xYX;>U9t58W;4A3JtoHj{uBm<5o38ru;g0`G|QfsiUJ2|3D@>zxq8nj~;yn}Ps z1_DaQjA7VKwQA_7JBv6uF=R!1OOYz*ZX0C5c_c6e-$va;cABNn7}ei@R3mKts&7kS z_hnLvTD4{8x^ej-T0(Apd^M6?<&8bS(6p)$YbJov+h0;qXgAD z)N-1no|yL($dXq@z&Fa5c@bu`^+XI~AubKMcr0_b#S|aV=8yDwM1x?jg0VU)LOt$f!CFBEHa}9GQ59}s^V;+28)v2ZnDR?{{a3-&0xf4Q%P(g z;t%Rm*GYii8RL61*uJsvcF|cQD<`s6r>#Se$vh*}n{S3CRWt>_^3@6{v$`3eD()-n>3oW{+7Y~z&hgy)x4juP(2-ymu_#mih$qJ6;%Me$p ztTpxC2{wM`MHG*L=1Sm)%F(umkk4kkyt~hg-3Q&U{oH(LVo+LT6#XditpFq)@V+vz zNC+^{mz&a8UD(`j&mnxbZWNRYIE*h1Vjnr+%ZN_&W1L2atg2wtnd0i5IQe&B(@$j)X4o70F{z9^0&m(e)9UU}#!Dy3RL@QXC;KjvbKVYvt2L$r zynW3706d?j(fgf72*8p1yf!8wz$NvB^`JV0m z5}FQ)Bn32f#PJXKbPX3v@;g+Y!U5TKl;%RXdk{-OqH#^r+J=CdSVktMu1abut%Liu z5|2j@goW%Lv=8fgP#NWJ&SFHf;WQL^KnH1LK+a`>9u}(Pq;jtmKG(9dw!XF;gS_Qo zT*cl24`j3UPdca=gxCb<-33m&TZ3tG4*CO7q0#i=D9a7?>-!b7E3B0dYMER_=gChR zXx2~0NsUKqLv-s4{5jLOjbDinWQN}P&%EkmoRK&>dN8eo()1Rq^E`R3=Mek%Vj*W< z2Mk-923Udy7=h?=B(3#PGz>#cXqv?%2x}V*4*P5{o1fRT@D~tw5pol!I@YNZl0*}O zS6yMCj`U}PpCvzTV%y33WYpG0mNV!&!a+!?1e9qxfXv#;iofa zdoj5&(Kr33@r!dcjYDIS1UKCgl^TZodO1cL@8|Ud9l%WJF6`Vebmz+kwvS-co4P?O zE#h-|^sg#CZL!G%7g*NOuSx^V3Yub-P|e)L;(7z8u=m zK=xg2XWhQ-zDL4&Z*G{neZ-65e8{=)loBt~{z2w5rm!|l2!r~yGv+$Xo`(b=*Em0( z`#M2zPeoFIww)lj+`WdN4t6mwCL$RbS1C3VGxu_P$g!w>h!_FO&qD{WwPa=RaC&Ii zot=YtxqFtlytewB7TJ@#NbijHR(W5YrcD#DS)Dx}I;{sOq|>HD)CDO<50^YOak}z_ zJh>)Pn)4UI%*h6t?uIQ<5TBC_(5CR}#WFs$gfDK%^Aki@@@OeNh!FOPiElI%#itWC zm5?_L@hA5JX1tf<(MhwF+9Ew>Yg=prnMU;vD{v>-Mg9mHb+MWl_Mq%gWv0`UYT$pm zG@r^DJ4^*{+eRdaH6rs60#McAGYLsqZEi+pmh@#3Xfh4ClR@`d*vt4cyrRnUce(vy z8R{;Os^l0UimjV8d3em|remr?h8zK6d9WRy+y0nrGKPK9Y6}=j7@fh+F1l+=wh$nr zBpsnnTv$?+!ZeSTyiMWNe*&Qk#*Wh5?WdrrUvm&eLZfBBvf%dcWaL-^cydbFmK%0% zI;rYA*|D13zcuiygmjv+&`kGhat5sIZhW^SHD|t~{x=)chaAhR_}Ff5?(fy^v?tGv z8bf?yP*8@cG#uFBt-5^QC45I*Z36t|fk_Gtpy3EnSP%#cXanpHy!sGWk)AdwB69e~ zNddG=_6nKaXGHA!{{XP!PMmbY(o|I*j`q>26BRX`mXd&AP3Tt^jk9`HH0Tj}_M4aYEX#_0x07dMXT89^kkXUlxBcN6>gwGYM9< zytb8bC>zIerd}EkZ8R>o-*gHAmZCV9{MWyJ?T2F6RwHD&OXcK92wli~7jSNci(4K1 zKw**xq8E2*6xzgnImIwcjxk0aIa5_;BY`hKW zfftZ77W1hXiVivmmC`vlI22Lzqaa~{a>Ku2k9%>jVjIY-m7^FFeV&)R8YN3$7y`<#mHp#dVoj z;mImY#&RO6dwiC`;{nPOKOtLU1tkpW*Q zG!=>hmOsz*;N5VNDnk1T5n3F9+RyF=FBG+_lds_ z%gh^uW4U%XMDR}=atM#c&F6Jd&X``f8XXvZAlm^Dem(X2^%FOGI9NLT)nW7^u+oEr zUCR&WO$%eo7BNUnYJeXKfolme(8;=_dVi^%KKiOLk++mtujl^%0K2g-tU$F$DO@=z z8Z>$$RmO!XQ|;p=MhPYDUe&aEjiNhXyV{J(VRn=hsEN~iQ!e=i)q^f_1N~J%T1PHW zO43uxPxQQt_zOxO>x1H)61r=6v~r^gx8qjIz%2CM7>qnQbex|HN0-V* zA7cVDGB6eHu=kxwRuqmy9pF@W5wN24LB6M4w{{0-jPW$ zfJY$iwFDqZuL?qVX(l(ko?y(IkkgAQ6W_{=@v`Ij%*FL)1NSt+A_Ws02dM1ZBiRf>vAUs4gx=t!bi{{VnhBCrwx3A~s! zv{5b3+aQsQ3M9kZAJrOW>**}I9`hv1jg7PDsR{&e2~d{G9%tEwYJ0O@e&#@6EY&Ai z_0L3wQ{fXqH9$VTcpWaiju#yHm4nI;*y$9X=IZ20gQOW~QPFG>8<6?+7~g^ised9` z=L!ij8c=ykZ?bGH$f{<9bHO84AuWQPoBsd{6`bctjMT$^qcLFCwdY;;CIU;*Vor#k z9QJFlh2jCSPd;FOo9>~kMtv682@6x(aU&T>+C>h{78W!;Y1+X;;PmZc8lPnD%5rHM zF+k60jH+}kAW%mYY@de?qiKU4SP-*+up)gPaJ0?!^oR4tB4`cD?1E%T?0&3O8u$#W zL}U|Hkr(WDb&6#0EHR1iW;{l2o5|yI9+;X2qeXcLaPxTROI;vEb?MMVqwtr$+`L)R zT3Q^hziM;$bI}H}D}!-l$E^PV;2tD!ZbCFh0iiF*aRPY9Gw_B)$9^TrkK-)LWMwqS z1xNHy5S zO#!NKTQ`6hJ-()!rkF>?comA;88(pTC&FWU$ z4IOr9;&oA|>3-N4VCG~+R%DeQ?#_fsg$K$nRm$Go)lZ|#yS8<2=*3G?Ee5kL9Yvn7?xO-VC0S|Ql(8j{(GK* z#3oHf0?RH=QJCLFjdN*=!;%8`FnU^mL}L#Sg?xj3trjyoIQ9sba0KR>@-mt>zZMVD zL|fFhYr;|AVNLz%3wMS!VWoia1x8sT2*s`jk|T2frJi`%j#o8nS_pu{2!{_WqXe91FY7O3}pry(`f zgr_p5b4^fA`8fh?=8h}*<&{%(rMuaJ%CbkM8>y0+m z_u)Pfe}QKJ^(2FCO=X>?0s^u;XGzkx3k=bBGaMNf1PB^$#Qy*?KZ*)E6_F3sZxs4I zR(9#vl@%GLdKSU+usS~LsQyOE7U=Vu#w#WjTe8wVNrGc&TW#4f#1Z6IQR6{em&q@8wMy*D=a?(kMFW7>FF0(M>kXrKt zVs4kRniQE}5=Q)bcpz(K=+ZMx@2SeQt=4erGX}DZ9Y|`>XUG};0d$!I+U=DR>SM;c z10Lv%qGMyYdMXL{fmGXra?&hQK+!xz*crql)ThYF<-ViIko{Ol#HV<`tq&RXlqEIzP{`$;~Ad`F?Bm?RtDyX_dB z#6iK~oe4~7;$-Ca)SRuAvZ>eJKNJ(Hu_^h=m&H@1mYNAyJ?IAd1J?mm+p2|)a~s(t z=j;V;ZM-J$rt9||!n2~dwg~wMTYx0%G9Up*8);CvXYEE`(S|G;Oc%%?ae~hiVOh5t zG});Ee;iy`ePK=3%#wQ-*-cNTAA8 zxI*PiU60$jrXOW8ll20w^{{T7#^lnu_x7MimiV%Ah^Ak=1%1@m%U;;I+TQI_tFor?|Z02k%s5=vA zF(gBmJY?CzRJJ%BpIl^t=HbC345pifgktS>V^~1~oWdgtiQqNuC2BPMk)M>TXafcu z%W6XuRMHrYO21*t6b%;%bv3q-Hbk4)S7N1(<8*2U zE&PZ`b>>X#>0ao?KLj-Uhl4P9pohl=KA&}|9KAj^C9i=8ov@KdLgQKpelSe}b5P)N z-;xv|$MHX$iN>->)eup_CENxPA`_CsOaLzxf^gy!`e&~NSy zgfS0=LkIT1QMjXzK*rt^nwgdF`HF5u7#=JtTns3JzI7Xzx#t#!014F>VVNBN01q2~ zjV*A#c3h=q6t{ohLr&yUs!I|>h=kAw1jc&|Rt0bAWWtEZho133fG&u(z}slySh@-+ zts@ZHUZ%Hf^U@v`0GyU% z%rU3OMz2T|sne$#C#8W5?$9lRV3ub*)te5)z!BKFmN?b$tS)3%v7pPe6-oCvo{w%^ z-5D9%en7pZzpx{un(g`lX$i{lxFcS$s&*5Nu6ND{Gdwtsy8v`3sdZPuRoTxe<-TjS z7B7E(^3lX3aD2;%wL~i0aAwi?+FH<0&#^F7uu7uiH&3d*qZhf3>+fR5>BPDK`}e=3I0() zh@=_&dar0&_hkf-#TOZ}q@N*4qt>PE%?y8(`PxgYR_cKkFtZX5-T)DeiYM8Vk`%?q zOagz%HjSPWgUrWsHTe~Mb3g~Gd|*rsfPKOJ=@#mfJ2fVr7n*?=Im7U|_?nFB)Ol?c z+9lOs(4@^8_D%=Ps$LtZeUuXMz1@pF07N3z$v^$(;-krn=?Vy@xU4fT*Vq^gY&5nz zZ~jOX{TEKAWSoiNBnDPyyr@u;gMH73Nw}iO%$or%tyNi>!lgP2`&; zH5N4Q!W({+Hd6GcFoe4s9K>=Q_V}B<%}QJ`%9r&9K~GeEyzo&71?4=av7#)25gXDfIE54Q?B z2AEIJo}88V&(#5D@oAT?tswF_>VIB~LI6=Kt;aE5lo*TCvl(ybk41k`EBcXm(CVBv zl;eZT^c9K94eg;DjnE@Xe1IHLyc|R+@Jv9gF3^_iD9nRKYdK)p0Yr`mv1G$j0>u5x zkJhj@v`v6%AwL&tlZ6vMs(m2Xq;{8S%6$poQg9wi)0c-iD*=k+=%eHWUSDPX`h0#> zOM|Im#|pT(B;p99rD(H&WT(TTsstbyUbv&h1&mZLayQJk%oYSBhGT=2EV+YF-!sIp zf#6)gohQtg;)+b&tOJ4Lx8PUEyMI4t9jxfgkxe%&6tyGxs$3pSO$v!w+n(dllQaUg zu&pbp6#!1or^<^{Dk2!Pl|qzt*F|yJ91Yw507E8y3EWumy`mAgP>>@Zolz_a{@TyT zEV2&N>3-YQ)5g)8X(=> z*Z9vI4XckGHvO-&{El`mDCtoaY&GK)ve`OrJmJ|oMkJ&eZ0=w9ai99C>o&mBp!;W& z53X#=xg2NfBXCpJL|PNd95yat4C_^Z=uL#&`ZgJXP&O}jamYD^81-uKXzPmvUdN$R z1Y=b&Wf_aX8nIN$SiwL}_hmnsUVSw(Ge2w%OTrIp3`-_ZVA7;Nf=Kok$Vkjs@5>|b zsq`e{t22vu_=H+CDXakeuf31iOFgKQgfvW12Ku^SvB{3(85(Hp)^Cv@v^b(VUBu+T z^y~{Xg;AFZbdZ+=n4rdgsDW9{x})t9JGhC!%tqXQQ7h*#=n{Ha6uyXy(t1YQJX`G< zTK@pJ7kI2>1GXJJ*G#$5Sc6pw57qX}RO}?U3BanR4%?`MWoekFCj>k7`b#4Cu1P03mjJP)4s>@#58<*>(3vteS zzym;;uAXK%iPE|)padUWL$uJ#O%$~FK-j}!cUNVRi=h)BD(&DHCY_PAwo}BTU#*Q3 z`rp+aPS^pnvf0J^{Q+eDw!o zM)nunQi$nq_O|LXzE)wEZ{g^C&PYs_8JM7L){!9s266K*gm51GwDXqcd_z~d!O0NB z9}MZlq6G!B=c;r~(#3F7J^OTt8Hs0BfSpLRjN^EdE%&cXb7+${g#jCjQ?E`$T_w5L zmpL%Sgm6~}vn3G7PnCQy3(5qOsU#deVkOW<+cou!+T8q zbn$;%tYW9kB(O3(0UQ(X$;1+^SbR%qDo2h%DE@k$OANo@yPj`#3Q#1a3u-91Wkp5F zvcEiQzcH1bAnkcH5j*$RW}fOPlk1{V-3dtiJ-h*bz#eBiV+N-@aY^Q-$ zY?xVqoW;{1P%>hA+S)B z&n*COK##vG24acdHo7H`Ypt=t*yHL{kBty(`qu6O+N*8CwYTwMJrv-44pqUYM`dZ4 zZENkp@n{Yz4vr(ERWZYCt7*#e;}2$=dFHnP0TIE)r^s=Q4GRRPB!mcIalOM*HtRV0 z8!}b4IuzJh@HqZPxX^RbTu<~N>i%hU;=+A*(C{hmD+`?i6_%+J=>Gsy4pt7g&cT0H z_S1i&;K|z)Z(weegnaU+2a~Gcx7~LXJO=q#8vn_gDRj#10R7{=)Y|}3@>~s?(<}ic9e)KpPXh(Wm7}t zTc?+?PbYktv1u-yvfc`_rYQ3;i;c(!IU2}JF6%Pbr^aRPdpfn@mK8xmnS#{-z`RQQ ze_=v9B8D)`1Np>a6c#-VHm8~gjQ;?}2Ym>ZvswjQwX~d%)2xT0&~d?RxaK+tB45)b zGyxf8kmzB!32g4Dpvd1y)0o_OtImiv6Qghp0SKtt_hK#~xCINsc1kkhO=f z0(oPk3&Ivfh^81M5X9N#w%7*l(Y28ifte9bK1m**P8kih++#Z!5DKF9aSpbPjASBk zum~gE(z9vGZMi?BTtv#S=A~EoX*_#NwzB}KGf@i9XA7Izq`Z3qhN17cNpKY3`T2c; z+bVZm^Xg_WvFP#KTR@KaSlPDfJs&4&|b2dDS zXb|r97wPv(}d_LboD`8Kt3r6XKiQ%ohs$ATV%3 zf*BEt7E3doxCr1FrSN5rLf`3$gU&D~jkjQNhK;VA#<$61iP%cze^~2T8NqEzU%1gv z)G2~lCXRO60}39|cs73T9ekHDlE7Qtuq6Z>*&vU!sBSDaJ328LE05=4aX%N(^8Lac zpqPx`4<@147sPgZ0a}n|?HF$p@zz|?f)R+VK_GTl0xX+A!wU$x)kqQ)=s?nV^(nAG zN5felhVt?qj?l$=$w`WGxJk)yC?aU`41@`UL!gk@i6U^SFY+0eesytsSZ(7qd?6$P zbVCt#im3ViCg+_?fZ_?Phq@uiI0;KkCh6Q?YvL=%jdz6hvhE{BPGAwY=zfU_Gz(;a z#c$3EgDA^mcmy^KIl%bWJ8R(aLs8p9M2vRP;{Bn8OMACV7ismUXJ5s95ji5PiXmYP zm3^SW>X} zIOi(E5^O*fO^8ct#U>@O9+dJD%&H|K#EBD|Hm<-XJnO!nD${a46=<&9+;gqmtsd;G zjFIL|1VIYFkf%H^0HO0PRrm}qv#iQ3b~@(t6voCS5T*;_4Zi?@`n7-~yU0~k#ZP6KF`~*kxo_iQ zQ1TW*h+0BdCK@STj&lD1uW<)kIX-Y< z9iC7r6sbU>jmi$0%TM-~E=8)&SK*r15sJJRo&j$<`;HUd%ILZt7r}Jd6Q278D|d;j zf2m?CSbHiS9K&NcT?Jt)`ZJzj)pFl_$- zuRt&}a|+@ewW+`q<>40M`{}0|?2{!9boQjc z88%5uUB94W3MD$=dKWVWnh)J0yNb(haUAB*W<65%aksrYyu(*cvI-nb^=PmDD}*^t zatdN4WV%psk{e>evYC^2K6M5bL~98^Xafoxp0czwhu5qN{fn5v5l#;RT{zUiLZibE z{&5rz7dV=B6jad0Kjua;z+~bgkC@Y+nxa_f4Ht)yY}Z6cq60lvtnu$F{4GX4sTeAx zx|>1RA6HmqRrv=!oDsQ)JI#)aQIV9k$4Yv9kRnUXp`>IGA)H$pRW%^?B!ngckjn$EcgZQO@ILzpC3l0co%uF5c zLp~#_1WAmP!4+ZJf+5O}ISlESkrXSbWG`<*Q28L`Pyv)&Xu}pn1dNQs+E5xfNFwXmz1V0Y zdJi%Jo~)cE!^WTj+c`D=4E(byQH{WFOmQ`sGM&hXjs2TwdlqRD=!Xeq?_bvjk`^3 zr??t6(`tjjjbNi))`;-HQSx}&cY`6yiD1847$I}eXN)j;meetYi2+v0wd@oV!GUPQ z-wtny?N!`6!>39mHbz;Epjgz50#%ZdWEHC2 z=or-#jH_*_n%dU^MUb+0_5f|}+4MN<3p( zzHgL71zl4caQ+hm^_j1GJq>OyHcH#aS5AEDXl4YmwXpQJI*|^@^ypCu(a{>iA!w}6n{H9l3;(wCy zYpe$h>VYWef4eLNUTbVKOLeQ|+_XT4%fGNTOo6kpur|;<@j*C#LCAOdojiU;`m-XL z;+9(nMYt@BJJH*?FB*2s@Or24@gz4`4NFModfkMjVe-`u!_6^S{{YRSr>tz$fOk#@ zW7SI}u6D;UN|qHZ8%YNTE<}n(xMGgx@Py2|ej6Qkbk1KCw4QiQn#Upao3o)I-wn5(KN@!-EWNN-;XxJiuSRb5L~}!)_5`uvVFzrUpQfGk)!1 zvec$GsQ?>I7bCmBo7#ftQU=k+ZMNiUZQ#!4voVcxIgWt$9|wR)IuL51>vG}N!9td& zqT+BxS?hfn7`mhPoh_DB@>y9X#HO^t15c%HmFwnD40a(1OCA=Fkkgrics0^v8^IK~ zQ5Dbh&fuyOqRx-uekef9zUinmt%hlfs_oI@pC;ld>{t+r0-4FIyJ#J}2aq-{(uSr! zGFz)+gpY72*D4kwNMUZQy!8f-j4O zVoRgW0hVA(@3T~kpk8u89Pr1IwMi#HB7ZsTA_U&0ekRM4*vkL0iqb}BICfW zsS%D6?mG*+`6FHh*l73+J~7<@`b20IQ9@`qDlK#gOdjL(OMVOb;P`z`-+|LnM?XRs zWkXy2`9q9gF+lgk2~2@oa1R67*{p?tVHI0$C&Zylb67LAXEgHIoX`w|x>?eO6)hEB z2CCA9q+;U5hYSPXw;(qYR_7UAC1~rBllRP2;O&#EUIL-=2Dvvx&zIMzN^IWQm(_J9 zf``_Qo`;0G!N*Xl$W2*+5xwIOSZR~X4A7==$Aj6Z-5Ze8?AW&N?cO~acSnaCIkZ_T zaPPQ#&1j@^o{IpuQyngF^={55{{V`)y6jYvOwg#srY^)m+DAg=|NxI&HC{XK)k#3TNF?IN* zztV)a2wC7Jh&EQXku23AJ@0Ol6E9?A}e zI7d&lgGKu2rc=-z*opBio6B5LB=Ed0}CpSww*qcX6F{Pw6pFBk!p&1HrXz&(_jDWpZFMczEo;+JPv6`DB-o z=d@59Ynygq zGa+BmUeh$eHeIIO49o%7NIf0d!R;Av z@Eb2iff0ILp_R6FD3L78Q#msBxv(cQG*FJ9nmF7>L<}t7e~l}L<-0^oEZ9Z47`rf$ z-aL=%h#eE5o&r*Eh&vaOkSqM9_gV#Q23exu^ujDi!sLeKa}$g@R+MeXqY2kM2by^! z=E4qb1!(V594!G5-7;a0>E!(M+59yv)fx$Cau#B;jy9V1hFX>1cQEWJ>2ZS8lj+y( zs;aK7Xw6Wm-0oO~$fWF0b=NlqkCp4yCy54L|;9mKfJvKH;>U=#pxA84^nW-b`{Nu72wZzEeR?m8l9Fp>&a zpF#%~1Nc?P-t>WUGc@YV6m^bJ)6U7mv~0FljAo`TFon|d$CZs|F+N!ywG&?%Q5;wHk?i3?IZS{vpt%pM4!N;*c_Fy-_ga&>3g@#m)k3NZN znWBa)!IbL(4cOgDe1U365SSGaz5snuW_~A{OXnMAs=c419|7Q5+a(gJw0JPSj5}R- zCLblG0{0nD#2h|EHCnNbmTMA2ENyY)8f=b%TxxKG9J+>TlyO(CEeRV)ET%v@w$CT` z@urHBcPzSnwd`sRpuFroF)EvWFB(q6ttdOBc*ef3%%?1h>C*dqG}|2+C^ha*`q!?= zavmH09T7ol6h{{#2ycAw$rZsO{{Rn6kRQ;i$USbp@ZS#j!`53uF%(dby103|JOD3J zkro4x7%F|f+|%Xe4buaVVd=}zHxXyWKj)J-yxVtmV*&&0(1smXzN?V-B>-!VXRgy} zIv6_#+uO%vJ7x$1VmXZ>ML7F2S<3UtsE1xP;-BRe!++#>=N{p>Gw1|HrKCpdE+)uQ zx`aPTH@~dPl9u7|vPn8vH~&H$>$MbCIS7$PxHMU`~H>NJ|u# zKgkU95*NJSfhGmJ>{fG>m1E~VMxB8;?<3iOW0)J&_tNA8x6=Dz%NU3QPg95=I#xul zSLY8x~F{{U;0oj?Ux)5MDoWq@KsCah9}7)%8DwOxVMJ48R!(*sooTF%~N zWxL|N6?WOO{b2(1C>5k%E=n7;rwI(_-Y43eK4K9lUp)XuF(>!Befhdd&>XY4ev#n= zd?w(-P8gXIck_G+gTF=MByh22Et!cSZe^mce6sM9&G{#Ms0>pkL7RJJ{r|fJ@4w#cHv0__g&6=T;#eND#osC!?P&5-sp@t?;UDT&{PeG6egXcN} z4}t(=*$^$2!8?8|=tb+*a+U<7l^n8!veNZwI z1W}(QS1Wq7Sr7HzAEJ^!&RXAhcDpAiQG>`I2LvAILSMqn)xsdvbJBl zY6|eD{u;v*@CmUTK98M9S&j;$X((JK4~)s~IUTkh9%YMQ-O!^nxWdRtA}qG!&d8W8 z$X=}Ae^&y+K*N%^oA-OPq`%&!%87#dT7LCpOtz)cy=7F3)NwOj!^^%BXj%mOv{uKS zE#IqpUpe4g#pAbFcxRi1Nbr%GV-18y2C(kPuy+XkWqode>V&D*01m!bUI?J02L!uQOZUu0C}5pxvVU|wN?U@LRF>J!s(yh(gyWmi*skcV zD>vaN0uJ2aGaa?2v=fuFMH(ACIIUfsW+)40lA4c#KcXCcnv_D#wY9m*d zMuLjdyK&D>qngIvS2KNLfNzzTx9h+8M1J6!DFimU3cRcVgQ|&Mo}{tTI0|>$k-*d% z{)B(Ejm@D~ae;v?5olVSIJp&?0v&VA=*~cn_Awj9J+SzXGm?19z}|kwxwxih2S2xK zM-%PS8A13wW)qc}^`bLa_`4jKe&oK>bcxoe9x)@Gvt_*Fzx44v8#TP!d_(8{s>k=@aYjB@SyOQJ+98@A<$&obWYy1BI3hOY8G~KeXSZ={D z+#J);@DgwG!0G08_TY&KyY*YqGo+dt+5unQ`RS9;Bq9|aCmT4%VPUx@a_=|ODL?Rp zXAX6^pYtXNq{!qWJmmLz-9Yb?>z@XZltY1tP_h}3CksM*0BF9EDqv0(nUV_FLWD`U z8;v+9R_d1A(s2+LcwAra7TeQL?J&kdn4!}b08?*WxRreI4Jk-zLtOt zW)B?Mg~MbHcx2KzZa>~pniCId7*UaiS>w47lbTp^CT~iDl9&7-ZL3QDh?NH)Ks|;c zht`MnO3X?%m0fsfU#jsYxpSM<;k(g#NdrHFtsglE?+Ye6NvmoZ$N=i%{{VFI~5prE#UtEsNQ#z`Hv9h5*o)FdJ#+}8}Yfsx)urp z3cCBEV>2kwsEqBj$vaS%@DiQ4m_I>Y6+C}9+q$j=RtTU)ed~&vngKXh@*=DRG){#L zsxJMEf}fO+ra~c4?fCiFdZ3^s0ycDI!p4MZb0tX)y7D+Q2jO-rMhNC*9uJbb;{h>v zHmRM`QXYp+-bq=P5O+b*TyvB`kF5T_<@c^NLz+&rQzaV|M7s)&=dJ1s*mD(w@iIQe zi5BRG8b2(I^OD0w`9SvK^t4!LyBGX~j#u&=C`g>Kt7#vVLcy$E-9Igpy*Vm)v#*1z-%^&y!cB!8(jZAUPUc{}$FHbE z!=vK3ypjp>4uco2?E$w`WYqHemjuT*tVWEA!8+)e3G#`C$A9c{CGb8I!t96xUTv^T zdREb>DbXAGsjt}2*^p4_WM8DZNi3M7s2?(Sqi(~O)URQxU_E#6Y04_EfBE}gZ?a3C1{ zVnrs!8d1vTVfU;lbiGSiG;9goI~m01(X;|Prz=((*;kVP03Hyjl=OpseJtD{s8=PN*G7Wz^&w^0E6^Gl9Bw!FnsNS)M znyTQ^l1TV;7IzDzn$PSmG1GJ=M%foSpROeBf`O16iL{pK_P$whIvIB1Ostvb= z>_mk0pc~5Rfnzoeb$Q^68Lb5eSTZ&unyz7e46IJTo)vP<>6j|@228i3ZAnh=NL;vi z@j>(pMvbV+UkDyj1%4i+ zqlPUiY=;2^{Btmi(MjzqXH2wY-Y5a!l#eWRh|r|UDb93wJgyC#e7_i@)Z0fNLNT$s zW-3*-+-2fYo=5@~pl42J4%PfbL7;0T>n~Surou81@+i}0)Jd6qntj^M^KHzsJ^%@vZ(TUg^CiiEhy@rWgAE%55_MVK=^0rzVgv$Df%T>C z`IZ^&&W6ejlu>jvW&=plh<=Ye?K`RWlVTzAQ+18xru2M3f%X8T#Rks-uP}V5 z>-}!eIuft1g-Lqjh-k6QrWHu4-W8f-WN;@EOTYlHkH980< zZd~x~p5HgFS6OTq6Q8il(*c_I0anqrh%RkpHMd0he^^`|g(T#~Z4YE}k7wS z;2$1XIi5}6S9wHlBQ&2Uy*7xg@Ou&*j>XBovF@}bQ$>hetrdASqWi=XQW%F>u%Iyl z(4l^eE;)lRi)G_I%?v`#p8adl9bA=P3QpiW?nQeC)xnHtK4`0xPm6RFkkflCSF+)e zGzElthuUg~IC^|rOB6HPqZ+~Ley5cVm zMe(_e3`fsdY+Ul339mKJUg>XH3#ut$n2Q1C!;0ZMif0#!wltLPvqdfUw7$k-5yftZ zHct5q+sEv|n@K^L3-sm%hg~FWII<#njWWqGn>=X5#!T0jFtfJj<5qg~a(>2KWRTT? zPVUCeYQA59-j;FY!yMz*E>r*}fYqI+U&g3)%r&uNG?14}J0FY6!7{;;<`kf93MBdl2BLeI`9mMi3?Du&X#@(vr6%dJ#(FB+@>sI^7Efp_cFR6T5p_-fmOlxs7 zmLnp;aj(P}>d(U}Z3$D70#4pW!vIAc6{fN|YMV^;I}Ui;>idvGC<;FY1Jwvjv+^l^ zp-m!-OIS9bX9cX8$?pP3su3*(%|hyergpO%SXoyc*ciT2Io1HgL~3drXsqn(GEO|8 zc&R{lDvbU_L(Xv@!LZ2yn@gCLbKy*ziK@;}S|o)t&;Szv%WHu``fh#EL>IBK)Q+@t zCq=Qq56LHLk3r%~Z$Yt_D+du-`Ju(fh9L@m_{b1%fU`)&d!~P4$>U4o6(&o8Dm_TA zAT=Dtd9N1!t!oiSN8X{JuH+=Awk3}a_ez#$X`ws|IgQ7-L!+oDPV=pdT26FzHr-H* z^3RVpD|;Rpu>M1@=q|g|8FvW5uBf5aIq$PPZZsS$?`;J%L)d)Ewf!~$EPm;{PC;VI z>l_PG8LNMnLTrqpPBlj& z%CcY#MV8Dq4|M=3<_nxy!FP%c2(nue8jh3vi4cys{#?6vz7nhjhvR6mHnv7@3>rof zjPrI#F~2B97;VvqVt&3lUMZI$6qDme&=W5)s-l%&9rGW&pwbF;ROF+e5P; zSbXMcOHhD5^AI!xg#NZA>9)u3rAMjI2@P%f#D$lSeB4+g$+V@J(6?FLXmJmmu?1j<0wOtcaF|DAKyq_r2a2r8jji2W-bq#b%}2ZqeX&M) zlWwewQ|MiySJ*z4pGQ3%J(sPICb4;Q02L+vNa=1aX5u{ROxE0cKg`3h6zwf`J3k3r zOh9^0T)!9QFe#f!I0pMbFNtYKuZfydkC3RBMYmxH(Gwg0uVolpi3k$eH z?;kO=tx7ceD!Le|j*I6UVLt@jCvX0znxPei zxa~GW_d~ttQ{3#Jof-R*86jdp1i-Tt+P^<`G@o+;k8|awp=HZer7ES#ECrSWoCRSg zi*4)Uh2Smt`T_xf61|P=qxvbY--6N3DsetJ=>GriF2;O$4e9F&v_;!v6qCL_g?2qRoYTR=GAdrQ56;k=p!_nOGparmPC-9Ari;PM7=!$#AF^7aUta&i}97*p? z=z{Q2b;d64qf=+&U0rcgOO80D3D3q7QY~?wf#=~ekMmvUaVe1;$%E2su82<*~2QmVvh!?Hsnpgk|p7oe~AR(ZVU^3~&KVU+%1^rFPhU=EBWN?AR{4GDw?xHVLh8b#a;vs=g`?f zMHjttY}55-W@Z0sXhC0T0Ru`%mOcKuRukP)neVWF}1IlcGa=c2ftqiZVa%UNK zi-mc|0SNr35Q9qVO^VUMl4#p$VRAh9z=Fa#Nk~36Bw!{*Kw`))&A_}zLi(Tu53~zD-CA`fD{)L95GmSGj)w@Q%np2 zUgVER)k@_15$?GG8uKr){E-UmLc`o)WK`y^I=N}-9>DFbJ9=LCi4M;YE_#EBfsxT@ zONk}^tS=WQYORsw8tDb3DVZr5cm@cyADxtb<;?}iL8S3ag#zwjlzefju2nywpJ2)JU2`NfR0#S0pVcJuz zy!~SF=c6hrA-$plVk-bIvGd8D$mWOX9@cwxZ+MHLeXh{329;es|w z&25EQh0zv|u^oi5oISoynf<%BX&p#X*t+|_tknkZI)-%Rw(CHZ!#zh75`|^wX;_|( zr>&X!htak z4Y|80HSpQJCIkZ#!h}|o4s7SSz!?P)_X571xme$ha4f+Nk;HAWEW_5^@z8NRPi(N7 z1#>PfA{n;s2QS6hO=eBx0{{=8zdE_2aq%rh>?4dq;*T5;1%O%ncoL|$Su;%55n zIV*tiL6~;RXj$0}2pkm8q=xX&2>lG;cqS{Uc~>vhSw7Tki%ZK!^9_9d(V5Avs}s9B#t z0F!l`5A72GNM*Andm#q?@{o6N`+wAVz?Uq$V0>SfJnOvd-Y}x(!i95|HuUbZ5>|!w zy#j8*t2IB4!q5;?q!eX+!AVNEsB#RIn^ww&Wx!%ORrscks7dbyAQI3G!of~rw}~Kv zbYjJhM`iXh=L2R;+&?65ixc+CFr<59l`QKj^#(-;R~J8|RLGPJUXfwgGcpPg9+uWK z+F^*}c_OkEMYAGIFr;BaX8!;%$hv4?I!#f<(a->bkvFc_!$X+@pN%9vd*r`&&zWTb z@81Swf%4vNFy~F=ddlx}YB$QL`L1xd>3qT4+IqsKc@0^^-^|Sl;{`i@mLn;Zc{GF4 z%{OYt=d{Eq`n8jgv|Y@pB2s|rfNbS6W|v1i7NG)CaQoZu;A$A- zi}sWZ6C^uOoYL^Rql4Y*MRidMW-OiaxBeabz&FjrCu4Kj-1*0NkP*T>!Je_l1s}v< zV$Pos)2||B-CWZ9`eFF|?yEl3X7I|=mWW|TLsZ0QIjY~7hqUx|z?s1h!X8lV)?nO%gL#1rXl7{q8S@>RWG zwyUm)k#uKDO!oAxuVKy+x(5oY{{V;@GVTDMpGXuPvF3v-eqp!Q)jxqksH!v*(39O+ zU-U&3#BcTVk&cfAO<7;B7s#y});k*FE<}!(-V%%S_#UvL*2EfR8^ZH;&ZMG*0i#5M&(TP>%lim>2m3dXloPOrD`V#&-scH4%gDer)~Kfu+d&p014 z!o!}%xZ}NbFVV!RE7I%4nU@EVz&b1?r*py&77c1hrd4(kVd~tth4C4Orms$gm1zh& z6Kt`vp(Mu`350>M7G$z&(6$aNp;|~)p!+L|EFU7T71^6He`Aiiq4AK^)L={{sgnN9 za>ODPvjYH*C`m{E0Ou@a2ps0_)`mU6#1^_%@mv}xEtjt^Ty)o90vFLp^$!lrVpDxW zIOPOQ06cqr@P>xW2-TOxmx9>i+=YLcoPn!Kytb@SM}0)@8*E3$E~` z!qw(REmZtENIlyLO`H|a5mVZPwmF3UVW!wh!m;)S1AQfbBROAsPDGE;eCaVa&j7~P z70Qt%*#_%~&c2(pj1iA|OmQ`HQ2SkrpD?U`(6*=t(c!qL^IB4a|!t7DQu-qgP z5Z(0BAxs1UXpKjRL@=Y?wWbg#vk_}w?aRNV?!4!5`?Z4rA)s%}Yi$Ai0l4($jaENF zJyIw@Dn`@7P?FuIfV!04rYf^*;o(cA$nA3ld>6SZV zU|$xS`p+4-hy#;~z6QMYxe$2;gL4^Us7w#^($n^!gN?^$EkW?O2C;&{!2*^8PS(+s{DcP zva`}=pndSRWcf4bXaVFFy5<&H26>LTqhl|s!s-u;)DUDQTa;NX{g=KE*sn-X=1&SU z7oElKWkmSkU^&brfL)+<`eR*k?#lcgGC*K{{g!=<`NK#Hu%Ba=p!ms({pc@h7mUV+ zKPWM2_$j^#-nSlzCM|lNzT3$ z{y%*f)Cm6opp2_nF01o2qDY9t{N}WgWUGhR@Xg$=kl~?yk2p(xlVgyCYv0a46Swh3 zovEK5=L<^YhCs?v{VavJAb&NjBaKi~Vk{xhGPjL|(pcd*Jv5CvyibybN>-LkPucP< zeXuD!TMkJY%x1RgN0|wLa@dKya5Z{ABxxVb>lX6Av}FCKf5B5*s)+-oXET>uPhCd! zsISDUYi)~6I3b2X$uFEY`{4nFO5O08{9SZ_YW{|Sx~j!20OyN1c(mvSaB2{L4(WOb z`xBB%!t$A5sYJpuE25a#2<9#U5<&J>2q;NSKP~U_7|2m!P^JK$a%nh$?H0vndiSYx z30H)Fwp+(DE!5vihxdgk_u9B(vVvB=L%$Fk=s?5bM# zy?dZ?pu|T2cbv=Dy!UVwG3Ok_CIP7fxxpSxxBI~dFviQR>(gYI?jYUG+*sF74aWS~ z>p|m7X%j63F@BH%{{VKTwMG(5(bH0!k#{)=1fsM(^R*2^snW8wTQ?=T69`Qh-UW3ymNLc?wpr6%5rjXL zb){8Ra+b(@Q=fSH5bP3BZ%0ZUg9$1%;M&Da5UNHS&~&6JK!6n{@h?$s#bCrkd*`05 zoN-_Ygzmms2CnREO~Fb+E&z=Uw}5(_5s?l6qn_^Jy_in!3vTro4MdhT8cOz9F8=_| z{{Tn<(Vbv8pEPOVCKDBh*Y1*H)u_Kp}2enrq*-3=7fhU|0{ z5R$s7oz}vdYSbfKW+iZW&i!=D3+^&HbxXgO7Ee1t>`jCCI{EJT9?=I1(SVT^V$VS5 zuMgg?x)IKF*Tn|i(Ks5m#8vZwWMMRJ&Dk#F1)3NKR~o3>G#-&O(qm!o$EV zaEiA0kzcZnQPA{V+2Q!H)gI&wB5fn+=_DqoJPx1>u~pD684y}&0mZB1Tjwy3VuS3; zb(^U?&S2V=kW`#9VaSK+derQy4>l%c@~y?BTgZvdHY_Mr(m{ZQVbcWmJZ3}zm^x;v z8UlbQ^*~i*lS+uIfTaec7*6HX3?0z!L<{`#)%j_(bk|z(mS$~Q#f)~z@62!10=^3i zXTIVurmm$~5J3P|2yoBKhQty+k`BPPv4y%1Ho|`fyXZwZ+_$NLhJwtyc~5@`AMhp7 z{{X%<+e9^-(;v9zFeylKXU7@(fK172BxAKd5}G$ZK4kFv1eds%Sr*IqXg3#TpsIFH zK=od4vR-$2!TC9$mcpa^ZY)d48pP~kw#&7DabB7U+UELO!iD-V!+it`{{SsShB#9DGu+lRsyF194!#=Tj;IqAtyd**uDN z2mxPbJDh)#cv~;;qFgC!;C{IhadHa%%^1RVkSxh%2Nmv39v3ns9@~j2$ku`krR{kn z_`w*_j;7tF6!A`Z14k2DC`Cyxsx*^803@UT02H<>cLmyyGHNRoj3?xtA;&_iU@2~{8U5YVZx%zFyo#Zq5_1)i zT)Y|`MT?KS<2<*wa@{Vt-5SLeWidmHi>NyT+YPgsn;%;XK+5QyLq8$^>`G4OxiD+x}e>)po?D#|l(?3t5F_yY57IbiBak;?QW)8&nHe9ZT z%iOYt&KC2Uk@N83SvZbcDV?lxS-pXT>eyG+l}YRy>L2CY zfO8)Z?Yu1tRCTS7UgB4fxy#l>Z`NgY$yratvMy95-ZZVgP%MB8i?)m!S(e@JTbYjS zu(^%;XE~XI9g)8Lj$io6bhqb5r(@v3pw2pvACL_?JY3r)-HYql1+{Mtv_$>?4`hvWL$;(}DV zJ3MdX2#U#sRu=T}G$poXBEAVp6hYX1PXfl=r5WRM_VB)8Nk$4u z_vZ#>6fq*=>a++%t;a9AMy3hls$ql{WtJ>g-9D7Iqh!v_Sa6!qQ{PC{(F~DxuB2%} z8&t>w44IE6aJ8$^3P2fYiK1fPzjF9$DmgJeO-sMmfN0E8dBQcI`dL_5#QF||J~1B2 zOO6XZ@Q~N1AXKcbQsT_YvIDO@co9c8jjZ-!Ide(To_{+4vXh5DDEQ8_!m$ad25}@9 ztecv+F`hRSR9Py7z$*vn4igOl$j!@PfIDh(aKR@*Wsb1^4M@spvDOe>n)R-Hc!HW< zv??;nJT0sc2=SKzvhnLWu84-){Om_8ocwOXm5g-DEIo*PzYLl`G9x;N;}IFfPKn)5 zR3i_Ug1N(eL1EWHEu5CAyLixvZrn;iA~Bwe2xfDyyRM4L<0L1(3QXgcPYgz=QUluL z4y<&5VD^l)KJ{K|P7$w?l7OMcixb(bcou`}tZg-Mc})>G$5WsbJ;(@H_r3Srbn+`K zF}68ytVcYH4PCe=rW15;|NphxMn$gfTf*E zfq0?~X3oMe$~pHOOU-G$L=DkG2vH>tl*$HSdS(KkysMY&=g^oD3cJCQ4hbE5FeZ%mjLj;@ zQyfVp2Vm;K1aa-Lsvk!g0&9jM3$&le((ablIxdYEn_`6w<)9LQpf=AzVhkd%&tbAn zZ*k)eyuXX`q7Bq({{VzlQrCPIvqwXwE|r(V)CHpxuF;!j@iISrF^G0V2-1>8LC{J} z_k;c*|23!dqxh@g*{8j39 zEKCG+W8H&c>F1|}6M_eVrw}S@j8YkGJ_pm_$u>fp8bCajK~@jtp4p1T4+0o2 z2w~gH?eMmv|e)fsFUMg#kj&k@!F zPeBYTo~G}}_5wC&#5EYlTnV;u62OH zbcc793bS}S)9?5;ImF=K5TeqLVoepoVH{5>S9Wj1q-=T-XwH!vK4*8s2*&Of7;L{s zwaTU#GTOt+B2kQAyw9(BQ-%8<$n=eY;;qFnj9HU3@Ag*DNZYl-8YWf;-KB{2^EIf` zQzVz6Ln`yqNwfhPs`S}i4{XBcF3}X)ZVg>Fe4RzlJJh%-AOP=rw-YHmhGBBP=L2D* zi@vW|0%Us)KtwIz8_8ATApk_Y_HXDec}h@MsLs1zO%20%`o+2@<#VS^wOdwgPW4l!n-m=kw|yEY3$pSuy+ zM-z%J)uj^<)*~Z*T#bh zN$2Wl*v;Sor$P4}0l<1>jaPG210oiV;ZtI$X~xEpA!oGBoZwGghbnvH4ZQ^~FsRg) zny8%gH_cJRE)t-VM$`)=QR3BPp7W95eT-P~ieMI0)R;^>&6bk6-3Kku#_E8gk~zu3 zgOw>Oi7kGCJjxmA$O?czs2W*k;K3hJ?=my-7vHJ1CKvH5G;Jp&C) z3CULk!7Q+Q9gZ{90~glYN2(!bRb?jTUWLW{QIKc_kM73>{{Y%gYI8x4g71v1`byd{ z*f_#;g@ZALkZ7F2jzBaW!o1sY@dGuL;2cKyhc*1O!smMGm(_ubBoVf4jiQOMc{pL% zdvBOlH(?^~hif6JMG}w>?#D?ttps04(|UOOPplUFx@`cdC)YwNE}S@fQys%@AQ}dH zJjlM4)P$};%`3x>1z1ZXLFzGp@d}uxN?N6V{*utl0cbSZC-3EPHF=LMWIY+ zML<0Zh&{C+kfDe?faCY{W`&!{n*0Sa1(@U9DZzF$O@_RA2ra1rj@ve%_m1$YRN(CjE$HZ*y-=s7y&^paeXE?)*1NG8fs0 zjhc2nq|x$@H`XsH&^)nxAUP&pP)4<|w^4X>j*h&lfXEqe zhaBz!#Q289smHpZ0Jns;i>HQ~|bm>JLLpDwz zEumH07L()-&rBfEXA+@8SZI0avn6p;4lykRWwn`xaWqyKj#8JQy&m)(2N>y$_$PnN zvd3pblXf6C&ET?k#gG8DZmDZ0-n8L^Z&5-J)}vB=Uq9lezNix8jq? zPd~X@TD;?zrnliG@+Zh(TJoDwz`k7pM(fP?r4k}*pcu}z{Y-BJSvZ7Jjt8&`{;Zx(t^xm&WYqcPGrWj1&9 zq-EStS+~yPqEj0&4t)qC$6Ck{0P}8-)(?VH749X<1875|C?mkf zJ>5vsB;8S+j7jz4C}InpR+k0_hDb((J1!USh%->y?CBC$IOMcrRL44+J01!}G3j+< zjh?bOMhZm3WCu0)$tQnZ8ezU$0RwV;>MQ>MbkxjqIQJ&AqNqnZ zi)d@b0k}?Qkeo9Ue*Xad>SaDNp(vb5tCVE%8+fSP1K56TB0vJ|h4X6hS7ZwjzRCi% zL5jMtH$KF+{{Sr^_~InAsVkkMGiGfZj`^=qO92SDv;P1NwXXcKPcqybTt#@l5}H!g zdIKkBlG9=S(R2S>#ElikBo0)fqfyXBZKG3BJ47VEI8A-d)TT4$f0ZF#yIEOLAbpZAq|Ckl z00{k>>?YjDB4xS)jlqP3u2Z6#k3n#KT$$+kgtHow;8|ErX*gZzD`;`sk>tJ?>jc#k zmE<}jZm>$Fx82jP3f813HWUnod!iWG00oTb<%317A&`^6d6qeSDI|zamt#HARqij3 z{dax?xW$zO47r%Br-Cet`>{w?qWxd?a$tK^SnaI1fk>OM_#-fmu!pXJhU6gb$`ap^ zkUq3ybLaGRSURW?NSCiJT7%F|d9T;+q;i;iB$`~{B=UbA(sAO-Ig~*y7W@H{8V7M z9V0%Gr)Gv{%Jhy}M;;h!i@>4=>G#D>;YxZofpCZ2DN|kRIgBCiZ(oEi(>l{yFr;Eo z0ZK)0@NcaKPC)rj&!yUg(nh#Zad#=kkns!3Ycq}b7oMkI3df^0H2c z|HJ?#5di@K00RL500RI50|5a5009vp05L&PVR3ND*v7o7sQh zI6EH^bX+OxbN%eIsCV%t_D`Qvoy>_zT1Hv^clpEJRRG=j-|4^pJg@Ki(SP1X7^V^g|d8 z`Nb?FKsJUyHHG`pB!?=Yq zNaM7R6e|jMO4RSy6wlY7>|#J55F70ICu!Fh@j)R#hi6v>VAbo4AOkvRjqNNuKzzIv z-eG_SvH-U`7wRYiY66=z+@FPR`^PXC5EnP~?fyTUwbnPzW6}x$SsJKBHYnOaXyI~t0+oG#PMZQoOkKt^1KeTX z&N&65uxxLJ&YKTT9;j#|jFbI|~{qQ^9{H41rm6j;&;>iy=BU zipt8WJz+v_wRH1=RtN}5G*~(l!);XI!U(J#=vovE#_oou?$lt$HZ08Px-IAAB8Ndz zp%{fa!|;=rr#-*XvnPJ+X{4~|6G~Nf)!&pF&p;XghS&ZC>-pky2+IRNFyghN#Qips%_;@lXUc3@U@BCEm{IbcmvZRzJxCrYU7O1FR*r z_wgLRpJd|Z4>&4lXRtWNy_bQc9w>;DpxrwE01nSrNAsG`!$iONNLoOoQ`}z3zt2%Q z5=97F%(T)a>8!B(Kq6;lCrL`d&6#u}K}#e7(mFq8+pGf(0<5tI!U+AWAVg4fzZU0= zKe+L*fGsojLmUDxTl$g`H9LJ z4GF|$E-Di5?+lBJU(7Ah_w}o{h$w=AgbJTUxBmcqF0fJ>9oDqJui|nLj4c{gKy`kN zeWw6h04GQoW;bBmUisTwXaGtTV?^GM$~;i`2)jd3S{;t@$ArQoQ*>^HjiIa<4lCM$ zyV|mboMVm`gf`N^?`o%qh3|IAbzp7Q0)b+YM#Ff*@K`{=PWulBOTLVJByNhOwThU* zdaVkxPS(JGCA~d&oKZwZZYr^VpKJ`bkjkPOIf$butN0dhCaFoJ8ZF!vOl+?Vtz|%< zN4CA(KdxD}r=fuOCKhN5Zz$xT!Sak<=#5I*+hi0**f1uazsbXuQ`UU^I5>`<+0KK3 zFbVDcfh_ZQu%cw~3I@d=8%kA~0Vquo4h^sJ?w#?O4z*jOjwOpbz8o+rTds+w7rq`; z0V}XawEnQ&_lU?2GcRX@p(qX5^yh2{8`giz{{XDvBMOxTtJAXhqmuJO5$Mly+HQdE z_rYXufuR;bSZ-E)MWA0Kn}W`-@%e|!HjA}m6@ywY_rR+K`T(zIWAPT~PA36q3DP37 z7JJsof$9OqI!vP~OW+kaL)!-G1 z9x$(IdNtmo(SxRb8NEKCgN67!NbjU-r`*LFK}v z^t_B25h)rp)@1MgVb?MOEke;*kz$}vg9*c>)Mmn))Pyx?vbTbimZ1R6dswjquZ)VF zd#+a%urfRJA+#{nc6@z-2EO^1=?FmTWkY*EMPdAR!o%oKZ1T28_x-ZcfCLwS*~~QG8I^N?w`*|{{ZJBFvZw<3FsD^37#0BOZ5Oq zfZ1cN=5fZ*5}#ToYwQlwH~|QSoy2Xnt0ej8vf2_PT<)@IJ~W4XOkqzy>OG|D&#%o1CQf<_{1QYqvo*~-ogKtXC2`AL5TKZlD5MG>TkEM!BOfD3U=H5S*r+zGDuRIEcuVl(L;YzQevvXAS)#7NB9Yf4v2 zQ?pjw6={C|0NH)N$Z$)`y8?HVi@V$A=L}rLV8T&b(d&?p)hxo*dyj2NJG@Nbl!O4m z%K-!}ho@iy7!A_!M{2-)IRgEN1u3C+j=iNVsfi?~*-eTl@B>5;CA`@Ev(dSUN+CW)O@5v8W4M zn{$G+H6H7s_?@Futnl=O2}>wP@xE{DvW|ulq7s^7dr^lVf``1VK%g+Y((x2cwPXU* z3++37<`@WzXU@7ljLhlxftbMvX38F(U!Qj5>46rrz5Ng63%pDK)**a;S+YOPSQZ8} zsg>~QzCg2vSglSrB;UUDpS-deSQ6A|pNCJe_;3u`49Z6n*Y&{f#Tx{OOrsL_b-&*T z2uL4c)4LU9d)@4=m!oHBubVxPooZ~Az^0=oiQjwvxTdWOr$AyBf!E)hvlvNmtRH*2 z&#mD&dNM8XF3^4$nRmA{icAXh?t(9-r^@e3R%d;HRo;l|oga9sEOS^X?VS(r z{O2k8^Jho(a=d6sG!+r>hvjVExaK8&Z4Y3dO!v#fg!)D4N_4toHhyuFK_WDJ=+&d) zi$LR2#G@z(zZQ4z+ruG%SO)~xrKE%UVE}6=VK^Y5Dda48HDbtzK%lda#1g+h#=gMQ(Fe__R!xFFV(%)_g9=J3>Qm#WSFE&l+Uyt}C+ zbc?DBKIcyqf&*2Hi_%%F==j3l1yN3s3>AMw{{X%bkRAj*B6rvFouV!*5Z&JVN#xCc zsmobJB1TY<*rfLN@W|grg5JsZ`@hGJf<TK|Mf)vb7V%D$cl2@)ClS=$OpjoL(Wu z8ca-nyNYkw!mI?EV%kw?2_SlP$TUHC2TMuRhksr25QLSw`khcW?VM<+RelnQO?FxQ zm@q$c8=W-sukXW`$T0<=3JDR!TcU>=YQ`04c1t?C^~~s9flNAd9M-F{?V3mcKnK84 zE{fBTvQ`M2HE^!p#iUpwNti7hpaA)LajPIIW{YD}e?))qk>vOzt&uPenHRTFNv=Sn zT%W>ppM9d_(<8tzUn&vR3yVacBpZsESGIri#KE310^Nm4N3*=~ppHtEtD{RMg=x~y zx?xwRp$t2#YVdNVp27pG1tbaKH;0h62_R`{{1qL!cw!J56@2>dmxo&xRJsxi9)xpi zculF4&eNm->D&Re?-NsCXiZ+3r_&twlvG1gB)OEnKk>jNo^q(cvHRAKrwKm;uu^?M zI_iHu^63mu>l)f+;jG0>1<6q8Mz+gK4o zP(=x_X3RmNYlic>oPeXC3+0OY9KN!qP=p`w-*Oy@iMlpnUj7zkOD_vJ{{Ra>f60HQ zkWMi;H}tcl z?&_WUdLvTwcQC6j^N)M>@+iSpR4`=!0GH=)7hd**gvbvBCc`+y&}h_yui~Ho0B0xK zxI4su3jY9D8^yS60+2m|o-Aqv?~_i1Q>um88=j~6#i4{Io}Ie?0E1rd7$Za;$zx0a zU*hi+{*go%XF=QwnDlZ8hPnhzpICJKtH+>PIw*(lZ9Z@DPIK>xP$P&n{1Z2g>XHDg zMr(|<&f@XxtMzv(62@2az70%KTX64o2ex-OAZAuE^052`FPdjMWCjANzEiIH{{RWX z5F7=9)r9M3he21JGExjuuyi{=nQxMG@nP#j1akWj{NE4DysxgRcNFf z^5KiB-j7bDj)ftK=~Ii5VKp(R#2a?G*?Z(68?Zi$l-bo1hw3?b5MU_;;H(QI8mAL4 zWyxa*Q6f4g5*sF>r6;Jfd++8Pw%8EuO$bJOq%P+#RtgmpPehke&+O*Agb-4(rYqrL zch2Pl3kbCWz*0Y@An!5S0wPdQRMpPRC!xl47iytEhZvVd1PBgl20u+;h@~@2w48g! zoB#+24uROgooRy#0xy6tyR`-t&|w>*xI2L{Z*R(nC^X^{jvlAz^{QOFGGI&}h6bO` zo%V51S`-7J2HaOdG8@K<`w7Vv*Sy66P*VWQr+X|)Nd~_=*fdb)1VL0CYgWma=JPD4WxNPbt;`^SFNiZNpa%XO>y zc!o3r-k4jGI{yIb@PHVv!BP{OS_MK755R?^^MaYA2-p!t zs)TgMfPku9cI?uf{r>`t{C@Jcf*E1DSGha%{{T!KfI~?C0G|tY>3Fg! z?sTwZ5?Yt>ylf#gO(10T^+%>hh+sw`v`lY%HIuu^9vH+8&d%CD#X4dYRh1VFdb|1G zL4BwQGPxos?7a_mDSjXe11!=4!Z951&76ApLTTcly`?hM!(z=*1$rljl65b_ap?q3 z0~U{u(0BfDtbsEJ0VTaVs9`u|i(W7Rq2HZ8Jjg_8M|CQwpjd~=fDHi&rCTi?i|PLW ze6UEgbq=L4{D)rfvgl(SCvW`MsXja@-?K8s5&2(Uj`M}lXc(vT9pxXH=O_tam>tFV zJ$%2jngjvA3TDQCJFuI_IKLbt&{03z+r_9*1ZGt~*S~4QT4;m4g?E~ltLHVB%sM?g z-Hbovr0<-LRJ0%a+ZF{)Ivt$QphOuRxY~>x;M{)othU=ad9Kb0e>jnpzzR6U0 ziq|L4{{H~uh!0a5iQU zZ>s&sQCWeYt;K&1@K*U}7>M1xclZz^owI#WYLFhux_s-+!~5aVVG%{7_@*@{-{?2g zS>B+3Y%plO8_V-X0PZGxqdRT6-wiNemMbgF5&8GxT0A8;P@g+b>hTnyIWj4CqxU** zTnkiFu=>)ijQCnp6Dpw8*c$oae#Do9Unsrl77cb`=JK(y(1wcAfef<Y z59f0rUQH6Kza=315`8-Fm@Jn;fr7z%6?68y33ahiDXfqCAI?}QwTbF11f<71=;cgG7K*T1U*WwSa1?=( zoiS?l+_UEQ#_Ax9)|L3{?`H`GQ+z_xV0~bzA37kx)(!;ICrpV~0p(JYE2O#SLGss_ z4tp7o%Jn~o`ICV9kdeLip!`Z>wj2uNkW>f_nHY7aUCuZzR2ila6u88grPVc~`qHV8 z*RnAbl98%E1izvA$*P$i_Rr${`<8IL<%M#QNNLOEkDC>u6MI)beD8=ajmD9C_}<_j zxj8~mn&YgW;(fcGaaaPa@DBdT{LDThNRRi^=imN#@&QTN3u!V#546*j1vdH~KKB=+ zqj(O;Yj(wde4##l<7S9x$O4Vtlftv_D;s3HOJ(yO!QVQN+Qs$*q8PXd4YL{y#2#5% zhU$l1NlEFeAStC{5zu!n?}!;9m!hlF6_5M(ji7d?YJ8m^&h_U80uZPh=za!2Z7&6e zSWye4pS1q~?)k3Up;!w>E575^dQlK2AX?|H{{ZGaJVUEVMp9^ahh5*Gb8~bp%HWyX zEB^pJycA;El1}FbYu8h*6Jp?~4@G*br$3e{OmksT#BQSMEkTN0Q=-zin{?{qKX~AW zK|(C72hBgVyihV>1w%C*mDGXxag*XvWRjSdEi}T%LH!3oe!vE&F(I}PfGg++y~;HK zpi#jR1<=MFg{TtE2dN8V{D1s1MR)1{07!Y8=PR%wT4uOU`=A@wB!3ktlm3qH^Zh4u zts@j9ei0xj_;H@mmAALnhw`u9OdupaMdHtc-`URsBDO#zffXZf@FBp3qFo-dH#~a# z^x-%vggB?lwXojd&wMaeVSiqC*XJXrd7y^V^2QRE`hA2T8Ug^Dk7Yk<@@NJQmBTM_ zZ@u-%fi!G7pm%@AXI;Gs0vpgWTz%ipxKN-w%4Kx(Oqn?0X%v)0I_^84pG3GdVN+rv zHzoW?tI2c%K`=%2jJN%GJzEYRKQPzr{I{48^fBG25+$bRXAq!>Suhk&a+aYnZBDQiJRS^;~4b$4fJlG{g}>;D7g?V+Et=Gtsm|@*hm{)mb0@n z!=_NhS1FHx>MKAcpC))GQYToewK70DoDvvC0aO6N_^bG?UT9<$05=Ij>Ux8$W*PJ# z2&4v43W@e5j2P)f*d!2WCyo!{#f63N&=)&d@ArMeXyVzay-LRTF4jDkFtki9u_Rvr zx|fE;uqry#)i7@hmI`zetvj4S90CB*i!?Q_Yx?ofB0&m= z0yruj>r-|~R6l|6h^hP4mH@JdR7yks=dXG?GZgQjHCRPg2B$;^mwhZvaxn^o}A1#=*j6i^RtefmR^8 zYYB6q5fI^2fQhK1Z}u{mh6PfPCe%hbF`w-oZlL1Ub)Vi_mUNrJD+B~>vmChp0H<$U zY%m~!YOAY3(gbV{Jrw9_qgtK4neYG#bOeKdjtAT(b~L_Fd6I`}GwC)N*5)xJ&>09R z)yg(~Uy;FuApuY=bxIO!c=@8Cz94R&+Ih#gFchItTi#SXrMbosg*qVQEQ33$K273L zQpzP1e_X>T!}tRK05fFej{uR{(eejN-$rjS#iUb0jT?Z(>7=h5mBJXf zR5gPMo!5{$3M<|`jGOxC>y6t~VPo4K!TcCArBgEqwczej!*{iaYaJBS)Rcvzk152!$~fXe2u{W}0F2?8`lL{Bu| z6B^<-3Y1!cw<>isx4V51JjGQRo$01mk6ZjeLMNoeeO zI>uc@YLr1(AJW5oWF(bZ0uZW;uqm;;A3*fbR@!vFf$e(Z)zb_WF?xSz=aFUX30+4} ziAG+?NDbnIYs874i)!{qE#(AV7DTe9x;Z}AzR}GDR6FS;d>c>qn23Z^D7UN){{V+v z+o%~yfSqFRWehGB2of+TyV_Md37w%RK!F3AQ{Ddn@GWZ$6si^Lw17CJG^NN1(AN;X z8V!GfX)H^0fhx1?n|+elhSz{^`P6#NX6snIfKrwq{#Mlf&l+Ixqh zNWY~QQc`+Jvd&!)Qp~`xemrm9o+w_DjTKP<$S-|7b^r?&K>~J^YdSCI3;XPV6j{)h zpt}N`86p!(kua%t8Bvp&D6$b2o9^EJ{$3L7A4Z5LQ^rp_PEU&*Yarcv?CbB~nQ$Jk z8vN4k-ltSi+J)xmF~_Kp@2%$8?gsw=DgK)G$%Oe$PNgD(_TN9A5Q!B;0-57*#C3aK zC87e0bk+D(Z(V0ShQg%(4v zg8HDkLTXRqW^*FR@1=*Xc0+r&n0ydf1gy3%lTkh1KotO0xQAODvlefhc7TPW0-#a< z0G|GFxSSS`$yQ#T`mExi4OkTO#-H?$7`0bQ$GUg_036CC!w7-gW+&9|nr7%SmER+M z@puS!fY68?EsTgE?4}H+u=1fAcMQ!q18R&*Lt^&o&71Eb0~Q01e)uf${{X&dS`iCx zRx)d`znwfMY9_i-H!!{S%64x7P#DP*2vfiR0EdSHl7X#H(nO_2LE06N_7J6!4~$1X z=L7%+2{FwNa9^L+;Cibpr2*J>#?8I(bm9IHYS$4#Q6L43n;LaA2MsLViQW^WTGBfR z>UFt)DDg``#-dGv0dOevJIgf5K{7rFu*4-!T8OX-r0U-`eVfB};0RXjrXB<3Zwo<* zKLLGZb`b4*Olb+9s; z#*t8&OnWQxX@(0odi#gP;c z*968rBSKIa`7WSf{{ZfH_nI=Xs6(q#^uZ4MoRukXT_}n80=v>ruwBwv!lhQ8;Qs)8 z3SkH@fl6sbAEyq=2oJ-KTz{X>1eFB=1w_3)KZz^3_5z9iuk-%^j!)E>FtNTBk9(v( zC_N(kVP=0VOz+MEVik2Yj;=pkVc>B7O{Qs3NZ>t-$0(Z>R3A^z-re()I}b_FGyD(Z zug*?-#4~B%r4Q5p01FUUvuB|J=yA@Y-KB_t8WntWeg3tac>(HluiCD6^NT2gr@tYc z)cl#tydY+)T0;(#sQjzVaT|yp9)HRFjO^mI=wI~x>Bukf=NcWwGv`O+=llMhXs)_R z6Zmr*v#wwOLdjTJhG8G(PPhhObN~SBaH*|*ob7rU07`1~RdfBbo3@~(mecB}c@OV| z(K;kD*X=UB{{S3|3V%R(?cDxKaW;Tq(FE=9LH_{3(aHn_y|qDp6py3&9NeZ*Lqhk_ z**pIL-vCfF1O~C!(i;B&&(-MqT@&xx-un#TQ6yBsS0R`rkInl=uzrA$<9u&lTieHN zG=Xae*RHzPTofx7u%RIxwUh78_(TDn9>y+yF1fd^xO0I79dyMv2|Vxk-RU8GrBNoC zB=iS^0uV>8i?3h(4@>|Ac4EbQtsqdzo!6<4`wL2C?BM`J zi0?t2b<^JS%g)i78#?Ld{{Xp+;9aQ@RDkJNuYZktI1MIk4DnHze~(@8h>|lJPw>D` zy3QZ65c`rDs^dbB&}hg<*bpd6AB_2V**f}=56X;CSGRj&IB3mB%l`oG`KlWT20&@9 z*u7rfwo|4jVv9`q4f>ni5D*kS+&_`gDfy2XAjqwd4BHU4Ak!+`>=gr)+X`uCsT z>lsJJVZI)!w_g7MQsW4})f<=ov8VD_NgfvwvsU82d8eiyLXnYu3_Ey$_`R4LXwJ(! z-rkYuUb(7vy8 zc>O8V>^?KvEN^pz=l}t?Jx?p~^MjnLOA6W)N(pl3tP-FDy&a+0PvrN5!3>qvQ9wt< zu+5#6)}kn&cq`^)-WQF)m?8xrS(*I@8B{_qVkKjIAJ(St9RN$n5~1q$Lr3h2i6LpI zD^S)Kuck@Nqn?iob%~FAy>B-F1yDIiY_%zK{&8(HtB)7%IF2}pEQQ*goCwG}$_{aWXz7+JQik~G|ReK;1!H=)h@nDB|zl)XkX ztP`d`eR!(SHI%8_Hf#Ia=0^e*dL0bYf9HxO;6+~7dNiIc**JkzR8uK!>Wq=6vUa8} zKnxVZxE|_M8^RdSPr{bW!ICdr8do{nRRQgZo~yjrDOxZ_h$+@6ukqgatTqUeiR0so z-b?sI6m3LWu|i~#?3&~hHA0H19VhmK!Ii`_gaBp)0ug(I!iW&KH?Z&iUS}3T4@)So zY6}}U5dlwr{>}X+afDL_I;pV)5hj_5cohVgW2fa)ByOB>lQa|(O)@2_F7^i?pwR-W zLSI$hjXU?5fF)2rCZ|rmA7Ua%V3nq-Hb6k6oZ%l@0g9DHT}A^9N2F#Ns}r8e?TVGH z$BxgOFJEn3<)gpyY1is&1%`>9to1Otcr3NW)4ToRB0n$VU=!i1o=_-3r zcMBAPAsP&!e6rpm%x7qc1{p(dfTi9N9XHaNA7H|D{QJg}Wk2QVdj6C6*ujD_4$MS} zMY)%Y_)=IbCFuiHW%!&I%La3-|f=OAnITY+-{)Nvv?(_pr()Adu^v)NOLF@ z1Z|I}bw7wB+sNq*2g_?CRr%L{7E}tO8Ix&)^P|hc4^z@b7P`ME{(iZnsXBrsx`?!5 zl+A2tL8GLdpTM=Zx0bq4q_#fpj|h)6`t1JzKaQ{a%VtMu1q5gb1e;K%MnW~&6R&Qu z`G2uKPKinLPmyqPR6qxVu{|O8QI^N-!1AXQQt1pWcNSb0R}nSvsg|k#sz@}ExIFn_x&ydgBmIY6_G)-dm|uHkA>_mZc}x$hSK^xisO$?mtqyyPaBdN4>+kAZ$1Fk-D0F|&%FglZ;zDX0*=oB@ z9QbgWL0zRGMf1bCY+fp81UgvSP_Y*IygS8_091p!S4wL8%L_rVrn(@V)hgL|CW$2! zp5ThKmXgT?6TDZlA-G7aSJUjxHbm_iZ_cWF9%{f6T@XjaQm|gsUPB3tqLv!FpRdP9 zk@Xx!CwKLoPpR3+SX=~x6cs*zEMA-?5QU{GWWSdLG~@+?7Baq4hIdfB9VM^?*c^lS z=VhHdXx3ecn$gMMMUPnU2yrk#2U>xSJ3q!oib-%3wMz~E0EWdq17!z9`)CK>4YPvm zAyC5fvV7W|?H&W$2oZy|bgY=|eip*#0iqFF^@$!mX`vdpzV+TdQ)fFE5K9s)3T;#T zla)nQy%zR;FWz_tqz~QK-#6YglW_!5gi~>Szd3ftPYpz6ApuDA1pR6`=8mn!{{Wo4 zv2f}jSf#t!;OF3syC8<5(+pXoQ2SWy|nQD zEWd^6Qp06Y1_BLIQ^onEVmx{cbFpt;BOK*NdsYI2Av;1v{{Y7W#pw@Sr7)=oRffkg zmW638&-J0Cf35;XA@Ty{6RY1}8gXi(q-2KMNCXhqdpSaSc+?(TR{idG(l3nsBpHEJ zZOoYW!(oWQ4A2u(w~@AziE)UeJsr;e=kF1u0LWFb=t^u1Zg{uF>J7_;ni!FR42 zxhWE-=|JLx-{%;G9Xc`JzbQEI0ILLIFJA?=6zx%U^2&Bm$(NZKEKa5^NQ86eG1i%Qt zs)q4Gxh3^m=4bw1rvwK`V|!ZnzrQ{3#Seh!pK|{I_wm9&h>wj=-OjxJ@XHtt&dyl< zzAN)2<4#0M;e+?7h}Bf%LQip)A-rsSE!A)fD6ib%WP`h55m-ibQVu zYt-yJJ>nzmR68M1$f4Kp;9=uI0cmwb3bC2^sEH91rVhlh(#Cbklb{3wDlI^Ddol{Z z&;lx&K_6OPvt=a7Dx$!sB2;lYOy(I)ke6nU=z|EjAQEUN5}hDTsQ&;dnF5u7W=d1| z{P-pF+)X(v&bh3*!whPNiX12lC_x|qdn63hbwO$!LLg?K5`!=u)^Ly?Wa|7 zMQd78it{22(^#tBq`Jrxq}U}+)M+mhO2UL&NG-Plv%g!+Q7jo0+@`NWK1 zr(&Hdot(0%f+mO zT`g`28z=W?yq6UtE{@IoomVn(fHg!H6q)l|^Z9r{%1dD&cfFEN0hgEC0U?XyEG6!L zTmw&F>=9yh3%N`oC#q&yZ->;O=ZA_$=@ zI5n*zfLW(P3w$UEL`S00=>P>w3$p@koAZU$q6T*4Jgjsdq zCxto}^QRli5;nAVKt;+n-RQh_Gc3C$J&KJR^JCpqOTC~B0+NC#dwXv($^mF6q7@wR z^LLdEAhSgL$~bp)*A|_Gcm*!YwSd^Ie-Cl#j)Q{J`>Cz`!b-WB1JKCL%l46b>h9fQ=X$Dc!Js z))$%}3)a?6*cT}idb}Ko!o`BQSml%d02XL90H7o^hy>`9_$Wa(RMV#qSNa!|kb`f3 z)}KH5trX3UW9Ch=8!IJd8;D&U=xK!;s=b#&zv3YY_Z=s%yHatx1C0lk68_Gsce|#+UT`ue;}5TusglE{iLjXGnzQ}l^n(x-h-=a-L5Wb3lw5%@0eEGQ8a zG`Mx3?f&mD{5Si4jnrB-*GB>(xoTd6!U~sW?-D>opEW9&L=N`<0LU>dT))dGAwFBv zLbi!3+lBryf3JGt5C!_BK6^=i{{X=pK|ps?U8333j7{JoKnoAKDyRU4=?k284ZpW` zceVb!$BH;Y&8Eyd-cvvI!a!ERGh0ja>6rD&#Wb)=GvfIl{=;@%uvj#m_dlDD8}6S( ztXM(J8WDT%QZr)jAXaTUtQIY*ssuso-KwIox1y-3y*j9x)v8*hc8%KWb8^0~bMg;7 zKO}iQ_kCa2^}eWcDNHZ@DB6aWa84VjuqU%CpQ4J3EKRBA7T4-_^Erzx-m84SwY9ct zmK0dSMnWi&WJ$*W*sm(wG@45J&@&Z@tc{g)@vYZ9zr;L8=Oj3+3in`DqPB(eRepQ)!m?ZdvdBjrswY+8#82zIq^JIzt zAA?%NhgS^0GnHqw3U5AgPTU%y#akS3Uv@s4x+>1jRl1FE-+_d-rIkU(#X)0?AbIgR9B_J{S64UjMue}ywZXygQ?nk?4F0iylvIJV zmG=BsojA4rvCpYC3alH`k=~p2qhOcL!TY0%k=00sNWL@khus+a6B&^&@RRNzEQv7- zRDY*wpJ(SlJF`F2T^Sx`CX>MFIY%n>x0>VJhEwTXaG+kDv!re8B-98HI%09WEH%RW z7tzvfi8pWC8nug1Jw`cpPw)Q++}YK@WWgm6#Tw}aWFPp#sHvh{nJpBK$A*L=J+t(* zon?$WF!50^fooP~7k?jj=ZQ2X)Q>TveFDpnPG&Y`^<{5=YKsJ<)ese$J;@rVRTp8h z<|3QidcDCs_>h6Tizw{qooM5E@zG~QqCs!6{E!RVHQe=^mY0y%<}UPad0AK`G}(mz z&5M-Ua}(Foeo6T*M&`oi@6E62ejO3JiH#b6Z(G9-7uJ`OZ@6Y31jBJ`hw+y5)Vc{0 zMJD}iS(fmv3Gi>CqP*Wv^UQngpta&0*HzcpbEt-X){vbCK81 zEj1k9KZHki@=CuZCx1%0`s;07+mLc^2G7VhyKr4zO@bCvhCHAc;c~J<=ld-FCN^Sq zr*`3iHt~*}W)JtF!wuUf;2g_&oquA#`$Yf>ZrTn(pPw}2z1#(Z2$PMMp)AMoAJ1A4 zDjMgN>*`x_6^eyI&*)V!zt*>@?(?w=m<9_t%=2eUZDVH1Zz|Z1==m5}G7kkGobAj( ztQ-pJo#VK6-tAnUxwfxg`-IMhJwHHSYn8Df{#p)J$=Re-CQw%pZx6DS22=1xWnh0z ziB4}X7bPH|HDdt<=T|L1itywtUUG@Am(d2d0My7A!l~wPlbj6m@ZE=v{{crgiz&z) z=C{u$ewk5=P_bX97$gzTT!fi`-Y(Kf`p5R7O^LyU3|rSw-xC;=Xx zI=rbYFSRZ}Y6#gL?BsIyu&@J#3Aw&Jwd}#ZS@FB_pZ_%Yn2%c$f^}-xxiZ!fRF@Kc z)NJ0Wd1KA^-5S%&v+S=k8}dfRhIkd6QqA9hZxA3sPby_07D8FH_riyvDiWLfEY;j; zuO8KK&Xc;Lw|bYHRZpXq&dUE^VH_mJHh*X)UdHF60dJ7Q!7ATBmR|(Gy`e|aAx~BBw#^asWROJI)}rvO@}&i-ZNp#~+T0?Swedoqz-cy`1UjP2G@FE!6!bsD>ks6hpOK437v8`j!O zK$YGHxMOP67VmmZTd0km`9d7ddVmvWF1j$HC2$07| zIj6@SyGX|BXoBS}X1i5i{92(xFxOT62UPzWT{yR-!8~FV%Rkui3RuKZh}FGPO_2Z*3DBIx+tqvLb3D~5aXE<2Ag=Q~o};uFX-q#?pMB@BSt@$4Erg$An&OHq8DtlT99ln!KocB(t-- zAcCc_K8${`Ge>E}By4j;Z&&*DyO9_lWg&$8ixZtfyFr}MW{E@noUW24MnQJeiSI-Zo6X{$h z$vxp=nomXw;lEjQ-m7VZshxCDbyK*Hvi4s-3LyV6znMiEUH`VvNQUObeznHf5}DVx zC+Qf@NEPP4mPnn78-bdaci|u{qc=vRkg6edeRMP#jrk6e`_W|p1s)6cmov}*q)(tZ zHDG+zjc**vR8FR-EYur6QqY8^LV!1YqZe!GBVcwp`r8rxN>?c-zfz*BN>~?S+0OSK zwxYj@>P7z$(b%(aDSNZ>s_SsTdi|B=RKX{W<4@JpMMIb;*WZ`9EGt8`x_`n3lWY6t zkglImKJ+c`{tk2RW;t0V^6RFiEG-0ZUe~jZgNjRzId2t@LlX-@2)<(s3<$v^3>ZN; zSwEaZsddxVc=V~n6)abpOa&-{4 z+hU}cu`EUx6`B#(6Kt0qFD{rT@5k%+zqas_Et3*lbSv?2e+aHQC3X0@j-t9%D~?Sb zXYy4-3H5&nVU3T36F}U{R&+URL1wI7V`XCb$M(- zpU-oZu@dp`<4}-ZLz~g!PQ&gZ88yth5XI79zPi1@F2TtDjgYGnF?SNGYxOym>Dmt` znZO$o@jh0+Ele*Lb-u)UP#zdhb(EQ@XCteq*EEgX?&ZJpP1vWtoSOU>Hf04+KopvE zRp;8P4aBU@NIa9IiT*t>^Nr2A&UxX7u`q>q_RLa4>0gDDxuT*ZDxSB4-nQ=Ms~bcw ztR+)~wW*D=NJcuyKf?s$d4ucaXB5&l5QOb6)F_9H6 zbXB+~oYB~s$mg4KEBf=zZ@(E?5E<$AI5<1bywY@)1>!k`T!pn6xbqAcvCFRd{tCfT zw0RVFM)WGnJY|bW863tcSk39BVu#ihkH7;@7?rI<(3R4!wcIhy5A!X1-YuT|WMO69 z+(+_wSOUBxsTI{z3|!Y{SP?3VRTD$ zL+=C};c(*)Xj6wfq=4Tsu2`(H(T>J2ZD&k(%a2ZWrU#PV)%}4jqr%$IKQi3s*j1X0 zEbx;GiSbRTmrd?wv;5b_2gkj-ePJpHGvaw6+9A&~XQ5!j=B}$b62Yf9tfiRw=Mit$ z=llNkcb(GR3o>NW?|=I6lVUx(m&HOSI#?%BMC>~C)9UXY@8iI?i_$i{zgJ?^G#Z{# z=;2)xSc^`DPoHM7J0Nh|TJ-5vfgt>rjrC|atw!sRcEZp1c%J~3<8{cewG+qqi=Xos zw((g>g>=s$hS4SwTM0l*__U~j@we51sy3PVuuzf!J^gPm!HQF*1iYdVLsr2U+i3!Q z+;LF4fN<3BHo0_ zq52B{26J->F%ausDK(CMeWje?jiWNglKQ(1*o)(?)qMra$3Nlzx&Al}a3x;^zEjQMhvfd2i)}_){eLT(&(xoh!7)+HD_0ASmY6h6%3Y+`|S&Gt-X(52MZLLyTvh^s5%BWK>o@ z2BX5g+8@qWyIB>1&t9xm@BZF7cW)&sYi;BcU>*LgI#5+Ed3Rb;=h~jzP5x%Di{7B= zUuFtX-*SWKXl8phPsT6Ml!dH}n2B(nfH0bY@u^wJqme6*<9-TZbX4!y-KYP$X_fS9 zjfVKM<2!Z@`2!LF5hM2-Dk{`N0GGw6&9E4nsxvsr+Af-W(vyn6>N!iwE-Twrs6c>I zFi)?0P6wqqA&~h{9`CXK1^0@&cWw? z8H6NE$6=Ayw9oQ8>oKcjHo3<6nqdi(Rv|oDAuQ}MxGD{O!sdYpxnSD zX)|AdiqFhV^bfupvuzDV94blNvf#2n_~p8Oj#G985z=;p&_^Y>9zoPUx6F``urR)NYHjNTS8<K(+ z$3G)YUp@njv8qs9yQS5T7c1sX*t_!|A~|KTYkYIf(t%O$qX4lE5G~vm&ifcc-Q@9@ zWnWgd%0U=I#L?&Hzt(+1Z1>c$s1f*4Bw~23&>piUh@)!zTe``AFN8%Lc*|)b_ou)` zqUo*y_8Lw_mG#y&3-sDn%I0xx>0>y`J;+shNBhaSd`7z!8l9%Xl z`KtF`7|I3*(8C4#t9Z>X%zg}XTUR;i(gMQUKR;&>Ea6DSSc;?Bs_uNWg;PXCdC3OX zH^PY`*|n!lq$@Dq(}G6fiQRvI)2-!SoUBul&J)CmGv(W#5TDG7EK6H%4*p}PXX*G= zut`$1=*&~--`IAZUG6PpJ%9`uiRTzjyEvQCcF?}PBrRC0{LzT3!Nv92uc@LVRK197 zpYMjxg%c~56uPP}hELfD>U4H%Bfrhkt~4ny?yUzD#p=bl$9)!tj1~Fu->w}Ue+$vK z-pK7bINe-j4N3dJCNX+9XnlO|*+XaC?zb;+-=Dw#!0rADf!RuAJN`N&F%oeN@GHt& zlkW!yTOQWTj6OO<#j!@?C0Cq zXxHKNiP**C&b$87?gko>uV2nRbTT4v_VqTo(4URapr{hKpEs#H(ns)Z*j z>ULpmc&a1ybujd9iqiX?%sZK(Q1wWA1>kM{(~xl%t-<(*WsLLkRiocKQgBV{y=;sl zUR6DzF&ng-=TELIVA8v1O_EQyPjC1TZKvcu&0H z@nQBAuG<5w>9N`wMEaSCp(oCn?N#~TRd+OVDtV~#$&)bCW@?a;L$&1QX>Ic1vfasF zYW%oQ32BFzCk`O4V9`uDmSnM2hKd>zgI69;pv%DY3MTM>huV^vB<(agdLOyy+Cw`UW*_z4v@u z13g(sH)Ge7GQl=-oEWptz`lQb<3FHdU=6UF<*>hqJ5&z!|E2)?XElBB;=?W0hq6p` z!8Y|$s}vaj+k_VrnSb)l92Id=gmZO{tFXVH`6I*WJA=0NhX&x(Qp)6J<39f2R#}h} zO*!RVgI)e~6KiH9=g-kApE2n$SYt_Z&k@;!qk_A1k8ZUj2;Gp}{Fg1FAqJ}LGjrYr z$U36`NkZug?oQM`OH<$!K|3aF3xyX)J(X*@DvOXm&SF2JD6jRXZm?O{ja#08NL$l1 z(K7SUO!k_k2m~_9i8wGF#AKa$V3G6s9d8ltb`==em&Ts2@^V>>R5Dt=mfKby3YI}* ztZRpE!>rw!%8V$agiEx4M+EMbfc&o@Y*qRH0XO=aSoq1aH`m_%3DbM17@?l?=gGQ! zl6iN7e~Fa3$(4ReN;Q7n;YWqk>i(mTtw`=!FzdY-*|&8J*=JD_%+rNyK8CFj9P~P$ zehIiGz8}mUY>1|~lz(&4?#baI-N+E)ZvJgArJC4m4e5SH8^vURFhU|_^4}CQJx?yl z=Y4a=G&NE<&k(>zJHjmXe5pAfXLf(B(lb_!Eb1) z*e&d2#v&$4(|wCNSxmh!4=sGtvd5LG)Z*tI)_sK=;%gV~YA9Iw7R3!=ug=eYuBPOzcgn|Xth+O>6#CyxvT>ARNsaj+Q;V7&F0$XeT31To)r zfSZvv^&_na)L@NGF@B70lm2jlxTy=ogN<~(CRRbaQJ+Kv%x!l{_NX2NI-X=FQw)23 z{L{=SBUY;G75I+ECi?@DdL?Q&MLWk5cWa9HrgbvsG^0(A-T8hN0=fY6x^AJiX2%U9 z#NA5Ax-pj!*F%qw=@z+qY%;LXs*o(#Jf#K%bqptl?8J3b^$zf)3R#X1Ov$%r84;S@tTnlLfBX3 zVB%0uTIHay7 zgD@vJw{wvoMEMV$u9)HCVhLB`%O510-;#E9R%qgWf_V3x z)3kKzd3=$W6iA4FR225UeY6`ejJrjQ)l2(rAbn>f_TK#bzekb$&dvL5l{PMe+`Ajh zNn+h36N)7LxPR6iIZIKA06Je@<4?d<1M&KT2JeS3?)z9((Ev(E=jemw2PJwXJW)rb zWOvIs%g8(|e>YBt7$z_B|D%8-(Y8hvz^ExO066dw7+-+?Sf4f;E2PEVtW4#bpQ28b ze5G>4aTr(ve`ig8TkPVYp9$qRX>_u0_tnK~38!w!EioH9)0)xbykITh`JCv!Si*@M zYXS@fRJ=dbgkJlkD$55K=THsX-Y6c%P7Z<{iXk0Vy?e-2kx2Z#_)JTUFjy<3f@6St zrq-(~uNS7S02{w#x&TXy5jZWT!PxwHmE)MFH0o-P%2yT=q`GtC#nSF>xb1@VOy$4j zr$*xES0nwzWY=WuRfv{BpU~_bbZ+i$&dYvb`Sc=F8_5htw|N`P7>U)hT4nKt(rgLIGD6czP;I9Fy$iG z^d_}3a#W6?GR;-E0!r^FJ#Zc`_rf#8`sl-Y3hdMUuksgJyWeG>ZKx`%y2mAIU z%y0T-W$?@DNLuuB2iQRjt5jTzmR*Yk*@@t?i%r$wwUcm#3;#kov!dXx*v#tLJR}0h z6Z^Z4JC$gMEQ(pn^M!i0)n57mO-_m$nO~xQ_P)*B@+q9$B3&Jl5UMoG$14##xKaW} zw=eu8;$o?m0oUh?0<8xZgp*{A)VJ>Uoh$#6LczN8?ypwOn<9h0zZ*YLlWvJ-uk}_Pb4?FkN+1rzhpy31tZzJ$F6aiu!7e?-RmOryvTz=-=ud&>09>%kHY7dahh{ zt0=rZRFcZDP+)1ak5R2}=kBV@jzC&=*{#`xy0)t++$1}%mBCP%c z8h3H$=+-{orCpejdjXB0x6qFM{+{u@=!^WTqsJtvk7zpf4wW}?$uv!1RC4WjbjEgK z&TNK- z{{5^1wkGPtrQ#WOw;aY~1use}T1P2V+OQI0tMVZYn_AN5PcgrpS>FWiMYOfINWbm@Sgb@=Zm`&E>7_hn|PV-HU@1K zjl&~9dF{saIkqltr4Aw=|6~SzCUe3#qG=AuDFbYdK0S@f_ML))7}e;2{t+r!P>wh)SJ7$b zj$>NoB^n2k{6vyoxyG6mq>1jW&0PciT5yRWC*9p`=(Rs`!}Pf{kw>0Yy=q#ec(@#-62VF3{;3qFD22dUVrr=sPQ(u}Z;lqMo?pI98ncWEM| zqqQmS%Lm3ND2LdMU5hFP7ZxCp?(5j6z_0eJf|P!n0Py^(+|hFvzy32q(*FFI;3}!` zZ^BkQW2vtWMB7^_BfDSp$NTaR7T|sc*7`gnCEF^&a)${N~vmwqA=Mtx#u;DPHMNi7z>-m)JY|v{?oID@jcD}Ys*&C5G zu2C0>EOZmfP2}$A7mf(|f<5_5%-)ci2+w0xGbePsYD%BsEv2oCbAxW*9uHc}%M^uB zAd=7W(H#M}t3po;gtpQ18XG2eeDOMW*43Lg?IUv8>_OnujL)ZWj}rd)3oN6!#1dC# z+l`)ZM&)z-is1GeA$Kj4uurX?+G)H>D>Ei1CO-SVsB$?c$T@LNKfiOOTKt6~NepH{ zc`M#J^U1@QE$BhwhE?rqBM(xG3V9H+;wa0mMR;cwHd=38iw(VcBy~PW z=uW4*JBIi;^8R0vmjLM7Vap5t<5g&=wI<6orIOA0qe?Md6p^PmQ*HmS2MM$}T~kxK z`Bi)1nrRNQqE)WbpzbbZOkuE5vRkF9Q-qC(3lGJ$f0groPX3WFvht-TzjmDFC{f|V zJvJ3vm7&`^GBTsF-dfz=vky_is(#&;B0MS4ClOq5Cm0(HU{yf<#PEGo*X;%a3-pZC zvxNx4Y%mD&;JW-NJ>xhlzI&;VjpR%=4T$+#FZz<~_2?3_$a%b;M(W?gZ}|vZJZQ^G z#j6dh6de@F8$+{OY(9MTfhmtG`XjQ`kR)A>Ig=LK-6;Ooa{UG{nvQQew#zB*QYQJDkd^um| z361&sp-vAR(|K*$=+&6 zxQ4p^b84oj=S~?yQde_gVmQ7?qbK9^#RYTs4lMk~^VuUPCJ6QY(!1HDD9aol1~bl4 z=I79_OUplMtu|MC@>cn}H=+t>z0GY@X|kChGOi@nGanFsJ`Hm;eX^Ar64r6W!1~z4 z_Hzq$@YA2Fi^<96lJ^DbNe!Gk90~~>@r(y55nJs=>f|cG^TE!)6NSl~+|1f7L7QPt z@;!{VwDUSv&-U^NHu%Re@@4NXIwU4S_j}8=G`-Z=G(*71r07ah1TwLw&^-r6VapPTfe)O#2TC+I6ut3cPcyD&YydTgMg~)U@0n z?c5MJJM$VvF!4vX9LvP#qGQNzSKA)afN!&I?pU7p@In_oV7QoZO+BDA zHk}=&QPwd4AYSV3@NTexA+liVO~CP5c%mRXG_pAJ3(%_l?OUN;_Y z-vBHcMASQjY2F?)O1kHsRM3Vu=KBXyqs9s3ChL{bWp4r*7;VMtmgDyPq^`%lX@CXJ zFFQT{|GZ=pGWma>{}Gc(ykuG6|M8N?8hUp37XQaf2LE4PvYY#MCKtn+O{K&ud$=HD z%O#cZnWbC&{tG^Bu57}OYu?>A$8>OI?6O{N_DkOzd_jF;88vwiDe&+pAyy2S*Syo zhM~n&55lNkLwI9ezP-}8zVJRH2Ub9_2W+4(E?J1yl64gWd z3~m8dkWV156988|@3MIJA8?Tij}_Q{=CPMoK$}yjTlCHWE9Iaf*`F@}Ey!|nD#_XX zOq9WH(FV@i;PnR@`e+4lYu5gkEOAozoU{^h2)vg61|$8cJ;=NwZ!Z4(G=1~M{61oG zTW}wRikFGFh$;(=3Nf^2->zTX*;{&A;60edSWQvgkgrTRxB`$HX?-l+xO&(F!2-~H zKDywc#T=&;>U#+YMKP*h`@hV9>eNxpx^%LS&k47}x{)^St0F5%n~mlYNZ3q?P9tkZ zzemQ6wtPFWV|`5KUTk%rD={Tk#FRx($P=i(6s!wVsVTDMgMmJLDO6)&)GbsF$pl-D z>ssHRrifQ53!!?$;+jSu?yFlGs>cEeDvtcQo+-MsU`5|c7> z>yOD*8dg!cd?~5=!|FMA4FL*c$r&Y(Si&?w&Z8p`2<0o7EBr{u$g$h;^C$!xpH2X0 zd7}3OcFLd=Fa&=w^DF;}o%}#8;RT6Tk&#xsBF41Pab>6I_((q-Gev?Wjx;k_=VO)n ztB~v7gLFG#tJCqQ3Tk)10ocb%FE3XeDo~mpnExu#MmPgKoMyvzr0|StVcv3<%7I*w zb1h=!!mSC*@RQ-6|Ap|0f3Ik9(Pxosy|sOxVzUA~n2TKF`1LC?`sc=to|p9rGU}qO z_@J58OtmmQ=)`SszmA;6Q0| ze`Isz^3tTH&K+#-j+7O?~vVEV&QPH_Z=PyW+qJYzK=`xEQYd1 z^`ZG#0IYhfu zkUR>Co8sM8ToiIie;^5#1j8T>M4OYnGKS=Z@afgVheWkRN|3ecfY#;JJzc;cZy%XzFLuaI+E8!2#lX@KChu7W5P+qykc=p z%BjFV153g=Gpf`eaFfXTpIL-kXW8yide6wwon^TVS%Ps{3gZ2+&r(LW6xc=iiVb}P z$Kf8U>-qw)GKD*GI7z&u+zM*3@1T74J&UOW6dh<#dXxPwjomX7pzxu@dtAk1&!i}9 zYf6j;ZsA+5B~@!uCTxiWf|XiUrHS5n`s(yem;(z_4xGdw7RJ2Lp+M>uvSgGvPVepU zsxrW|SQ64BFE2P+0J|){`Y#3%ZtGE$=NO|O)w`8k>|oA22=#{b0VTFxW`JL-zQ?^L z2xru0E%Ixier4t&&r1IkKB0ImmnIm$f0vU0ucUTu7bhg}8&V63Qx$!JHam+wo@t_T z)Sn5*gq>F0yeV3-=lldt)m@Rsg#mG!6!Y@7eL_Kyqdn5dkYPSi?$PD}CK`RpPqBbA zd5{ZKK6SXFRvC*+xfN~vA21lIXWF1G*sGG|=~9O5zI~@5pGclimP>z+3>s!cIcleF zpZS(frP1q+ebmKBZ*@lFnS zk6+UhdHvz*c_$mFFd4G;Hg&o3c)$&9y`)9LDCcj!u6*eeUYIxY=|3PsqF{gH^_T`G zoIhwX0WqAWqA1%9&#e$O>#mAFqqD!p?{1B{L`6#7%dd+bg4i_Xf(RuB&ILl{V%M@nMpQolhAN3G|t zqZ|NeyQowwj+ck@zha_Qx;uWIGB?W(C9uv#IY_+G`%eE;Po}M=$k9BY)(&53LD9 zZ3zuq`{nJIql{$4&w{gA?(S6Ir7FhrdJv>NBev=dr`4lmdf3<^jam=d<#J20W2R4na0amV<=A&fwjvpe!SQ5ZKJTsgeaw)w58MU!hAJoh$`0lL22mISdlVu;n2J zO?01XBB?s!o(d%X`&;_kGWo_CiprOYiVS#8$k~ypB4-3AX1$2Cue1pnoYFD4#J<=4 z@%$%eWR9VcbVfYFYS>r)_4t|d2Vc}$abywj2Kx#vuE%d^Ia9@yxCle#?c9-1V0_qoK;X$JX>|8kg&D? zBLo-TkY+0>!gdr$V66>+9+(X1mATMX_9&t?F?ONXL!P~M`Q||r^KSW~Q@kfVWbn32 zdW2ssqUc*vS~g`5c9cC4T#Aqbw8&XnD*)vXn^fsZ8HQnA9aT{5b+TKWoU!3l@byUS%xfcduuzUE>Mv61{p{|P zOAgx=TxOyCOzZF-BQF)%S>$v1W%Us>WPZWQuxs-oPHOS*xY$p%={el!d|;JsgXe6jck@#nUM-kRNc&`6V=wtLXFe(a#%o-XQ+as^C#Xt>(2Rty2r;JMFkI2SYzxm(O85yClnr# zO^J%Kn=T|E>VU^&t%U}?*(%L7E&nDzny~glyHE-*HWswl;c8e*)G$Sep=_xjCjngH zUdhzhCiao>?0qHWg+>TYhdNEf$2CHAfr?6MEXM2E*w|5di@cUMU>g7Zk9ue2LH$jp zTRKE&H_t6ulx1XpcNZmj^Y{`_{GyDD8{ypLeOom`v#5iS_(S#1^5m6Y+ z)~LAFuJ}E8DUi8BWJ)K$R;_QBJt+)6LE+A^5I#e#{p*kB7(NP>^|jxQ#j{5w(IO+b zzJv-8pwwlr&*)U(l6K#d+rXg@_TBZx4G_yeKe~D>86EvV(#KNQ#$0_R&-SfaTqw*9 z$&DYA>gP&|tVlvCFwkT`s6imvHy%|9DHE0}_jE?97OF&7dv$1;Lt``%h{J|}4WFGQ zx-^EF4?kUvhTA?9z2gz1u1V}|S=h>b3eGD~CaT|MAo4u#R{47`)>-Xn?M_$PeuLDKp=Uj%xnzjo1f zc70fG7TZGKal6SiL`y(GMq%2}@Ni|fX?HMNppg%Y8h9%M4~I>&QHM`E?W-y)K#Ou> zDx8zBW3ZTH-`tPGr?#&p+3#ck^p$=lf6XDX_hN_;)O}KNqJo^M{xgp@+kZf7=r~^P z4nAqsxDwnh0Nmiy8+?~)hs?W`kZBNDD6^$1qWCJUsrW{zvQFW`N8D;Hva49GaG&CF z;7!N~y2r>c$hfMKt18y;fP&>$jbDLy1&V2G(+eIMmzHILX@PB?DFv?fq!%S6aDb}aE(e6+Y=A0N`9Zferbd?2NH@^Cq-s;SgETtA8Vd_~u+ zcdo>%r|zN$be!#l+yM5tCj|? z8$4#$t+TnGy^tazf^1?Pc&}$OOVQ@Ul8LOtH^x?W?MlasWqhTghv_PE))!Tjksh$8 z6Kx&&AD|0jYDb`Z3>4Y=0vaG7@uU>lx(ao^ciU?sFVbdF6&67?Py*R4@E|U)Ak8%D zCL+_6#foV^jc8YB2)@Hdk+n!v8Ke?xPQ*k-6`=N*LaAq?`sJNCvOiK|S=I(4tNSwK ztK577t$^x^xV7M}ZF=}ZHY7A}Qf4D?0ULy56n!}!r^@JIpeFaqSXp+TdCPJWgen}8 z_UZ205jPwl>yn(c#5yr(z*o+1Sxm1vg(gRIl1Sg#6nC8qe~t(rV_!l8ZYY~bg! zg9g78SK?;*{lqX4Luo55q?|0PflUpBa0~U6i<=_e+>pW*R!emc;{Fb-_`!2d*53M^ z0&CiCdtq=-qmeI=<_ck}ga6Dx+>AwvT$15kn<-fHK@V+!$c)u!Oud^g81&fdE1H>w zu{ZkKnPU1#d}8#!lgD*B+zz%piXg*CXTINPeR9C4$;cxY%BS8C@X{Br-|7j2je7pb z679|juOOHP`aM!p2k~Ok5Vxas8J1a4kb!jD^)n2;NtQSaCv{WrI@l z8zf7yF(eNd2%@>ZU(3g*8=jJtejy!yo$`cJJQtbTod*-V%L#;YI1yfykJYq$CoKfj}hasG$&1?V?=VATC*0X1#)AA7)Nd1V{czEiq=^`66E>pZ|b!+)KvQBezv}W)NI~o z;GS6_zLgj_#N+~^7@?itM-NKk{Vd%2xOaxyVb1}W&pnrzz?=RLRyP@CD#N{Q13s4ND`iLeuRn!~ zU?;O_agOK&W4z(AfQTsivlbP~0qJJsR-5cK9$4w5OJR~=Mc%ADL3H}#^Lsam5LJu@ zx}wEy7mavf`lgH`?Yra}5(D50G6?6?=$@BGBBh=*iA<31MMJjI1tNrJ z2}N$ZnXM#e{?y^^?gZQ?R%7Wxy>Iw3VyvmsLc}oXo5+f~v>sVqvet{yr5tYMd1Obk zOht?wTp+a3V<670ur@fTu$TPz2iHFjy1}$lIf7hoo=$;3Bbc4og_v}o3Vc*Uh%JfNLef8wQwrb2;jFROix?&nq-`{Kl(le&8O zo|H}gs{MCt<_Ej1&c=Qk&j1A^4^B>XufvgB%)rW6g6#ls4Sgy6>oK-A9-Etq-_WNyGjaf#0ZdsN#ioSuhaKgJ;ot{yMRv!_#uzxP*Szm4n%p z3c_wE9L{e*12HN}-9IX+zhV!;m-AwQ)KIc9xZXjfsm08WCwWXB~jMMiV)H zXkz^%tdA(60{WQbJSM&Sxbrs1$Q0cOmT6;VcaOA*sfiVe33(`(TwYt!Fp1MZ>G_Wb z6XZ5(gjC4vUMO?cKVy$YAi7onuMYv7<}h0xvdk~kWg1rFMqD8BWHut`BY}bT`F!L9 zAu>RFfrks9qyWfxIk{Q7H8 z#Tzmzdgfa8iZTsi{)?gr>Itgm)z0p|K_(RW9sH~Y7GsU3jh-*88)aA}j#;A97QR&( z@KJt({r3M0BLmSw7-XNPmNpYiL5Ae6mYjOeb6x8cnlWjzU}nIQ+;PZgj6C{Vj(!A; zEyK?J`4D6s+gd?4ik!CUxA>MTPz1o4Q4ny9$Zd%QL7!~hmq!cEXgW#FZNG4}GQjC$ zK<(9v`>P#%5F~Js;48vPL!{LE-g>f-=0U6m`}II=QS6$d$dx0VfpDuX9lgZD;%@)e z?79{gL?ts(OS`0=fTN;oUZUW`SzC$`UUZp0wk??5^N+ovgwMD?ROj>bB-v!$O~PEs zWs5`=`t$>|J~m!kF!C0uo}uf_u+-go(JfF2j`4Q=S|Jy50ESqj!CsP$pS6~}6xypY zBXa+AaqnVcZ7iz<&YaH7l%D)Q?9p#r?-29nr|XvdO0wx=>PC^o>58u{~NzS zK*R-#instrh?+AqGX-3@!L3=2%+#{9w5-ek7vi3&m7=1#M_JkUDy}jcjvQsf(NbGl zT4rj0&hNqhegY5Ra1Q6@!+TuU>k8E-mPsl`Jiq>B=gupS{{iY*8K<+BE6JS<<6r^P z<`@D4k*{Q;pGYXgUQ0_?l_~eO1w<-6uo7*sNdmVPCDI>;d#GgWj&5dm1nzr88TFLY z+NriMP>{Hk$Q)(gY`*FrF74A;zHyC2zEVgW8~9z&GKbArm`Ho1}^pl@+&V=5`qlT_6Qr+ zSkNj61>`S5H4q@*40I^89Y}VYP~wY3oOeI!f~!l*hQUo97iI>LiVf~7>!njOv&2;7 z+xa{Ozym`DVbwG;S@^V=*wP!3P<^@K;Ahcuhm_=Qc(zQAAtpKgpt*AG>{p5yx9Lf% z?ANQ>CN(!1q#DO_)@wc9d0g=AD}tN5${7q>pXj4E-#tImb(#~z_NNnX$|`yEoW;N` zn^9kE-^TuyZWt@P)d|zA?QJM9vxAvah~WdtKN3FxveA8f3Hf#l-I5f9E8LYT`{`3E zp^Qu-R!&5HoQvtA+xPM%(C@GmFixZ6OET8CGbGZIMIrQq?QDgdv#H*<%vL+piRlxI zPbSlb%ycOmx|g|sKmLFBJKmGt?t0oe@9PT_+HFY=li@4R+oujy_ zG#a6fEoL_HygphGP$Z%$mLFE^GIXhDfLG--CR0|@y<0+kKDe+%v z$}92rBW%-MjvDCxm!UhBgSpH)2GSZx*V0K2Im5LzOydYRh+4H50pfE*JfyN*c-yYl z6Oa{I&ES?ATxwzNmmO@R?`s-V(4aXG?XWJnB=}GJI#guptO(U#Nir73cYLf-}9`W}GE*}97%u;RYNn(X>wepP`is=6so_JdQ`88M_$u5Z02 zjbe8DLEFH{il2lGi^+$MQO!WGtCZ8ezlS)g0w@2y=iYyduYM0Q7z(|9ZR;$V?=CVH z;8rNTXUl86dX=I0(#e`uQQwZ7vr+zV3;*`w?SRmFj^U-rY1NkifGOOE5ovMa~` z*vKs0mgx!Fic3prRg~ZD;;u|N_2j_-efab7mn>pJ!ZRxb@eGdWcg7>9i5}S$J}879 z_bbTvNJ>`mn#0&FTR-Xs$2?V{GvzQd!j9RD5}jA4$h#ss6`tD(kjY|NCWI?# zvEk7jRBTXV0y$6x{KBnl=ct0en4>z>2gwYN1mLE#4T z#9z%F=kj<}SWNmr1$yuEkB*YCpISy9_v4)?)DvV+4$u?2SDhsEE$dCL!Q{VuO5Kp0 z*Q;1ltC$%!oE{DKjc!Qqf4{xa69%B#4hsQ1cfi+Eulf?e5)m?^UXK?|v=Fxov4nWM z;XCU((#TeKBbl_ZbB2G9bZ6 zIpo`EjR^+~Mbj%%zRs0DA@5>A-nJh|>{7*1;|uY3EYq@#)}1xvS05Te{YluzGuQf` zIL7ThdnxVj?Cly%a{bKm`R#-YHiWw1wZ330DGOTh$p@8M!PSdCa6;U9j?SL&VS&ZV z6k|C~jy3bd2e)s0>o;6U^CHFmBfV72F4Xw8>DOfj&r=vujJ&;gIH&ylnIa1ru(Cg; z;oKjeeTRas6c-Z3)zIqFhe%=DF7NJ_U+FMK=zRj8I`cP&kAA+6RMxUJ`1JQPu^HL; zcc+d|WZkJrm1V9w(1*QQmKCTuln6*3GHAJa{mry`n$UHyy`Q4SF-;|l+L02Qc@6RQ z4VFDTLr2?Y{YhQZFVnq-k}J{Tv=1f0OVj}U_y2&m=59x=&%MTtOI%yBfcVQXO%9m+ z@Ls=r3Xbv|Q(I5ePbl_*2f@i;$Kxlym&~SjuP5vo;~V3djFzVY-NpVVX6J_AvVM_6 zyty?~$#yb9Z`^C>JniL#D+M^EjHJkR?Uj!lWDi{FWROfD3jFtw$sz15h<|U%(JlQ; z_9-4);=^MzYk%j13c;ro8nsAXIR;r|#a#qS|36^t;0B^$4lDOI_RF@_L8!LEwGmKs z|I1G#vOh*U6XxWy^RNrqMt*<0dHROT!Ci=m5>qRw3HOSk=DqzrUnV!hCLh6$!3`|S1rrL~wz1B#KT)P) zAiTYe*fPEP$I`ll&njZ>?0rS8D22>?{~DzHE{p|s{pR_!tdi#H^D`PljaXcM_f&g2raBZywxjo(W--a1~8km%nsWDY4uV)%&}xiUmH%B84@H z$%nH2#fpR0&IC0fYU?4M2?Ki-HM$O7u?QHayXc`}QBowIt1-rbxyj@R(aX;twu4CE z)efYuYLQEg>hMY-$o4u|j3vHV?YuXsH*-MnTlCY)SK*=8 zRdEMIgnU+fP+R_LzYQWUD#+4KA=OGX%8bxXh**hT)BJ;$cp)WR7WHMq;4hZ8zkzIs zgKj68EE%Bbvh=LXrpRk_C#cfs#kSVNM1pvTElxNcxXDK+M4g*k#b2JZVQAA02s`@| z?dD}Ju3TN#cp46JaQ5@evHp-k2a8r6Jhf?h(M{cGut*pPat0Wn`3cjy=yGZZqfgJx zUdXGrkuEOp@=|*Vv1PB)-S{7`kFM#65$dEs#<%gBA>hYo&+8{#YZe{c>vO9#G=^G^ z%06&jN*5cqwUdNs9#fle&O#7XIlMamBqVg8VRfY9F0-iymP;?xEqre&6^ihZQLnJ| z({UN%?jUFVasKqA^kXF=c}<19apH3bL@st5DYCkO&+;FO$89g&`}xUjI}x@Tm;2y# zP4)1#{vE!&03E~)FW2?s-*si&LB4dk10duJ1tuGzF#j<~)mw}0Vmgbv%jAL(N%*^W z&`8}^Id?+KHsckaUL4rlC4&pWXoWAKRntc2CpgjQ`elsIKj2&k`#DLvpJveU^v`=| zMo8VaYK6b|YQJYtIt8@mpl%3UpA&1ZjPzpH)o1`>p*RRSb!F*Cpd@$D1lOUl5x8{D zPOu=qI4}fT=}1c&bQ1Djda?E7=U>XE{p~7A`#kmapUQBP4@hqpD7oqv4&Io7z(gF& zdbWPi5uQ2+Vr)$+@$)vxN>~%yalD5g4mts~1yQsRXc_5Cf{8ywLsDoYe#$pbLO_Mk zY1x`9frsfS2X3>7vIk^TPYV%ZOc?L*f*!Jx3QAPZvb?<<&fXC%|^65iWFII4A-a z**(D8Ans1f@(PzxXUVpSk7>Uf?4w5%x`^kz523H#)w}uI6fTVZa(#dk9GB0tFOYd%=R)4vmRsgUT?_<+*3M?lFx~AbI_eFHXk6X3(lCvh zQC--Z)Zq1w@Vx69B{p*8b1{GMdnEQE0U`=?&`&M9J8m&5$JH=WyF@jUbp?L+KdXdf zeWVuTF41oKZE)pn77oe0j*Jsue&{FiMLfLdHh!P|7hgm^EY6+&$LOhAX=385zg9o{ zs($xHgwzZ&!1~-avV=+BKcI4TMnOg#e2ci%o#l?VQS#`3luM0Nm2{rP#9XZdAwXp2 z#E)Tb5RV@b<(_O4dgmyuIxpIU<@yik&x%w*SwM8$IU}i8)TiGE%Sg2Po?i^lc6Mk+ zVkp-c#*{|P1sv$Aq;wtg@?p#uCxsL>6dQvez+d~>jl!{Fr3a7Vr8ppn?Bj1@bCHcD zoPR*K#ia;&cr}_!D78K{kUhA}=~K}(jS!`zI7QT^qGiDcEF9Q4uUVAQY?1eQISwR? z8DY6^EBNmRt6E4NzMpv9_vBf-zNTXa{I(0>cXh5{B`Effn{koQVZZ80Sl1)_Drm$+ ztK#)F$g*)OGd(fml+TXBnw;usakV_zww>iApaBfgiAhB_$T*WsMm=pgLg~1je}*+P z-hjuhirOREFKIXzEJ>+%@utWuMgdTWS_weGLC!#c3@PS-=f;g;qOUfQ zaN{tW~WoLLib1S0104-nBOypl|2RCX>L{{b4tt(&(;`^VYv44KrKZFv@i&TRtuU;rFo!n1v__Grr1+Oya(poT|<}vO;n@(92MIxIoTb z&iyzG*8l6w`RINfgN8f zoc>6p?e33*FsU(^_0B*g?NYcHYR&cf@TEboTiPUMQ$T)0UW_$Tf^6?dC~~j3KpOBV z(ls`|xAFMv4^Jaq2s45vV9qN?{Ix|tTrjZ`T%Pq8J&LV6X-J+nU%oF_BnILEn|^M( zdkpHXf({||itGhcUZi$GX{kgUc0_mBp8zM(+U^N=pn{#(5BDW)UtTodLjrk;iOw3q z&!t?8=gSbCqtVDbBE<2Dk%kHTOPomWNobmf^c_%b#rJk1DVHX@*=LlRyW9c*g><1A zY5#iS+1O04fmt}C*$qz#R=k_2Aadl%Rsg`Q=VP0LT<;q$_roxwb!PVi>yPH#-V>vK z*Eb#Cv+ePO^RluQRKtY;M3Jw@-zLz~%>)XcF1LK-+_Aau;)swML3qI)1pCWNv0t?0 zBgYMpKGzB8hb{Ia!{5Xvk&Mc}yo zm;dQ@Ge@7#^k(FOzh2?ymz3|iPXlIbDSCvN)MPbp?a6e&J&TxR4{rZ@`Bxgmo4;`7 zN2z4cXy6_?@va3PHsD3%q-c4t)i>$E{s@pWp8Nz?82>8yzoJ*i9O5tNU!_q!L541P zbJ+!ijB)r-fPty@ox2-BiXV^$^1hTz2Mg9;2_>iB3&e-LjiocMd(sLe`5Mb-oT@>G zAYa&5ct?JVPMY4%5ScH+;Lcnp`VGjETh)5j9Ppem zaGYRkrh5n9XJD{>Sx{%)ng`j=6lw+5m~F)cQpsPdCZ|5fil4o36e{HoKsQxyCQx;x z0BiMVC2EU2x+Y3Md^;`ql+RrMCRxycu_7dvZW|VA6}j%4m?s+T1mCfRo9=fcJpPx;(_69bdk zaM6OxCbh>xA~Ug`-wU(|+7K)pQRm`VScUd{n3Y;Fytz6tSBA?=r^RV}uNovrm(fdX zt8K642LYIfo^RPY-7`jT#qqZE?OR)czpxUnk<1so{N^VLk76BgQxM9yjhVE%;!@ph z?=AhcTjfV@jcq3=Op0t?q+AM}xZbo<0@7q$#BJM9xE&&!E{s@HkP}ztX$$sc@lF%c z_W~#qVtwL9%KWJ1vPqKN=AGOVoFd~@pAEg&%dOTm@-a*mt3li?^_&(yc-$;?v-`WXS39pV#E#5A0q4%Hy>phX+9ml`PMH4{Az5 zu@W<5eI){Jg~$5qxk?Oo`MmJo2DSVJ>88NG5VsMkO%`G*&jo0B(t{*x>kTA|4tsGk zKwaW)w%PiV8(l3C(6GN>5BJ50`3lrQ$^VbB;WE>l@XE z2RVX0=fv(EEg-r@NPxr(b1cd>+0`D-Z#Gm6@Vcn=7ziU}aKR{z_q_p4JJb@p0K5a?yL*Ym`S4~52OYv&W zLjHjXl-=9(suZ_CQp#y*=#6)SAMbpSEc=!j!%*)Y1!veLn>=YGF+az`00Nxu+xPzd zi-8>#dPc_*Wp261m(_OY9wxiq1#q?x7||oSBK9ldY%u`7JxSgzL9mSzw{4Vv(=-Ua zCS^mLsfm(E!&)`3Yu1k3TiRPMVaQ4u$w(}!Sq7^Rh((pQEXLsYuXgSQd#bpElKA?W z$5p8S5RalmxXyi#tJ09%C_A&H~@mx?4XRUPOuPxW1 z^5&w5JB6&B_!b=eCHH>N?=`4866y6u3vttFNITvL)?lBcL5?V-Os>6?p%{)sksg2cW^Qyl56N*-pKbwM_N!}#y zI$7QFf-F1tM{?!+r~pHJ=bykyFsft@A&^D*-YBci0ZlQcx;E?$DIoeX*4&Jr!t)-q z>jG-`O6s~Lt+TV^XW@#D@JekPPH>mq+YT?Hc7s2t>YlwGM5&~qIb`x3@3x45@*d}6 z2Wyf>3pL951pN@?hWdP>B)sFUeMDiA<^YK=HJ5t_u?e(WPcxhi9`tLOY<6pZ_*f*- za5YvcSy)zv92~e0sq8f}SVm^qlr$2x0PPTF-24z1F_;X7)CWW?CL762+y$%00vSjf zFZ2aR5V_4rbmzJ69|mUYl1~`8Hh@18foU>?xo+Yy>u#oP^O_K&Y^d~2TQmt^gdbyh z;ToC6A*_vT8!i03V5$ps74MCzc{Ih!Fh`gw)lCt10+~eGLW^=WDt|@=p$*~|F2GW% zf1FKm?zCl#$760(4BV0{yqV-t#D(xi0;Sixl<`IEi*>SO%3_p!d9 zCT5~%UqL@Ix`>2eQ$1F<>I-vdg8-#|`GkBOL{use29_DUfyK0ZjwvY$N{6 z=!p_-P14=jqJIL1HRMDkmVh=K2e%SIHn2;w;V9CWt6+?hAy_Bo`R~)i^kA78if+x#g&$D&6$KjDS&BqL`29Fn0hiic=3zV z=95dm9}XPX)X;{^+)}fWQxD+!xbB`)c|WXg`Eg59X-{&|r~UJ_vRUNxqXt&a!daRQ z6o?V2Hh0UCX8wkU&C`>EQYWkB&RYHhbVW-f?GSQR^5VI2684w@a}oH61V_i}1juIW z>iy-vRTS;lv;b`1_Y(CdJnD_72%Y!~aoJFcS{kE4CesY{j2z?;`x!lGbm8EG_ zWPa<@FaeJo)U^>i3 zzX|E(^Gl!i>BPvu%2lK5Nc@-2zpq&ahCDyXoNZ5X-D@=Z^L+4scoSwx)295)BFt)Bm>wYLZU0*!$gS4+2Rz?dA} zJ)mdx?)LyizVwRx9=REzz>@moKWGP@#2B%ve&+3|>8@ljvVh8ipPh)Z$%w#P%@o=% zTQloBIR_NAS!(qn7i&EuAi;dwcox9#locEjh8C1(g1z;C-2ALSrt4YuB+|yJyK~RQ zuU;39!Ws)atyC6o7#};CX>ti6x^VK{R+j}E&kLHSjJg$FFxRW!xNJ2gH2Qv!jH4m;Ym8BS&_{KREmtr%#fON(!St7wC=9q@8 zElqI9cPYSrFiGbZ64e$#{|B`09vK`9e-9ApA#}_E?ZY%?v83~dbDu%HJQq`+ zow`Iglds?%aw|T}0f46nP)oVhcr*IZ*55Yv77!jlfIcM-v%SAHLJ$n zZTRy=S&C|;R`)D4|4&4Rrz1FA535;N>^ZRBWR7$cL+^^sWae$YP#d|Cn6#j}yW#Hc zSov;)PM>v5$s@VYW8*!M_59VmQ8#y=b7@y??Ebv6L7CQhiX2Ljt-b(>K=R<-`L&Ix zf)V9P37&^Hdv??D5FY_+V8#Yj<$CU@f1D-LQDGk`q@QVF7I0&|2W^_}Hw}wCrpOl| z`@^Z9A-_txw8PtD7KKnSGX1a%=$8o20@F6mVMv% z#QBnfzq@`Z;KeQ6P6yxYn;%xR)_^_`)YyoPQN-C#fSa6vzv!)s1fDOhE?ZPy4l$r+ zK+4w&>LK;iRgr{ZWH53MPfHGM@C2%(UgOdD72`n&2ui^cWpOE)?e zevtRuinY>j_O6|1Jtrp1DB+EBu;zsITD?#}d6BQn(KwdRIUq7eW+B~NlJE!HFo$98 z3)iNqmygY$lq(;O?H=rNC+9ci3Db)Yn*8{pr?9B+DZ(L^FMK)pjWk}Vvuu+8tl=qV zWEv|5gdGwfZ`3z@t)ojFcO|ub*7Rsd4)+|@S6#eQN~g9Bi3GkYKR`1XgsAo7j^094 zVR(DElM13p(k^Z5BHjDCc5 z>*BxRc3!r$s@{M^_6al&ADg+JnAZZ_n=~4^JUb{Z+mut2t`rRkS#2($w%2JU|00MO zBT=C;avjoe)zblOyuM}uV9t018fi?3;wPCTSClz>DybTg4F;@`+)=_Fx0P@d7Dm3c$1WdrixDk67p<@J3`R zJG)ZBVeY=UinYidDZ~55gVyGX3N~M{&kfy&k>;8m(AKr!JcR!!jnww$Yh01%)=TSw zy>>`{9i5W##M}gAUMkSCSmy10V!1#t(82v~MRDu)cwrW6GRo`%KV~nl>q;E5!a-pE{KZ1+Km5*uT4;=ni3f zL1{hXW+6NtzEFdyyf=J($ulfYArLl80w_VukSwV=I6T~yH>Sjd(9c338=B0sMY@pr zyQATBs`qBJUfndEEJ|N6KrYVhIbHM79{z+N`LC% z_i{`q+(b7CM7X8b1e1g=e2LgWWGOhrfTIoP`Iz8iy&&m*6o1Ih$EP5=l>y0$TBMR~ z_SA}g<$D7;Gf}v4wl$%9{Ga0eXDVs52(J1;@9?h759AUE^qgv*w`9b@qmG9Fgi^V! zve*g6l`5$1WW&Q}u$}OZY55WJlf$x)xP=*TxW2%hbM|v`D6a;%ct;Eq(k22YB*_KU zhe6Rhs=h--b}u{|ax3{rR}OJWmCqbJu`+WSB|R2url!<915fcP38rWNVy&ZFd*z`C z2CsMfa()X=0{-y2`dVwnT}o|OK_hj}F+n=pbKo2)&pl$lWmg&q6-+0(NA@xbj5aRW zk($xC$j-VPKKL*LbSqs33fw}rq?p#2(a{Ux1@+~}ME(Q!^A4>s_ZtREpB!f)6}#zL zk$;SXL}dn1;CCi>Dp)e_Lu=UF^ci6kSU8%@YlijCPUCeP;0PAP*LuyTCXW2VT{qt+ zV_ES}IgEy;1Uvj%M%0^sUB`HtwHk2}yCE~ALc^%>qwTJ&$@Q(1rV_}cB{e1m=Ry!H zVFie>3qSto?(Y|1`k+mU6l^?t0D|&@oVFyF{qRH<3CC{9M*eo27Ef0q<)DB-zPsbk zheykPFB~`BMUY0B(kXdyd2sqpQQHz<0PMhFw&8BxE*>iB--gM<0iCf!5@fQ;$gQ%p zTs!XN6MgGa;}U8xtq(=VssL7rAx-n?2*FFKFvx^`I9n^#4XaD8>OnsZ_k@OFo(>*e z?JCizB=^+A6}9$+`~%L{%Z!f*-B+)(dPKb>QCwYyF5cnT;J!;|RVxfH1|ny58Bom* z>o~vnkryDrQg6Nu|DB0^h>*{JEMNS^$#5ljmabi6nV;@TusjT}Qo)!wYX*&Vp$!wL zb@0%f`sd??BX&lyyZ0x`kez)!MS+&m6lkbLO?^^TvYd89z%8H0uV{Q{_mvvrumqnC z8fkLBa@%IIOIPa$u~K|PMY1MA=txHGho`e`xUhXj^8cam(%$HR9h4$SfpY-bJRINJ z7x}}D@+gaNkQ|HCP)|+B#}P@xIIhUYLh0^|hd1ayCi@bZI6m@3{DX4e1ZDcT#w}zO zUTju;Hb&d=WMr2|E*BH@Btyo`UFT-PP4DzO)j=((ICf^dG37T!{uPB-?@>tl^JkRzL3zKhcyovSI0U-me%gfUr%-xwrefxgn63(E9 zz29j*5z14LycJsZZp1j)sW75o8e@a>9+wd#xnT_1{*`qc2~cA;;f{&N4l|Q*4kEoA z$4o+!Me6CI6QV;Kv9r-~M{$*2+V3M+@`Kj!Em=UIit=M)!2((=^yu?&9*2t94d!+! zyS*22v?=1}7VcEd3)z`2nR^r=4lx-_k8Fhv0VRD&?374Z7cx1F&(H8ZHvnK=idt44jlU zP@UzqNf+m%O3+iMM26BB|Ba^RxRuWNlv=cs9#X(rUyhtsB z$bN50bV}L$UUJtsF$QO#+BC?dPMj=N(zDeAH&k8Fjh`!jx8*`9_pm?|*LEGzU zy_B0EKA`a4-{0luXa>l7*cE=Upuz)v5#%|MGdIoR=|Bc)s+v2<&RbJt5iKodvaC%m z=wUkiSgy}6Ry31*5A8q|{)dQcv_AIEi0sFq+_r!t{_7R#NIBX?^i7k7hRfv9qbCum z$vIvVqat~mVWQhYb!RJ?_~wvwx6}5HrsJn??5s42^VG4Ooz7_Q6%Zl%>61XW2Nh#N zU-)?9*9BXmn#zNOu!~D)F<$gR5YWk8p`-g>L9cUnYw!T3%HP?Ps{P?3;>z^+sy(q6TIl{4y(sO5LjYhAD^^NKd!;_A9Y*4210IE%6wW3lT)ZxEcTWi_Z}*v>xhQ)Iuz|N@TTaO#8V$SowvI- z?B2?cqQga~MAtxh73Z+-R7h72CXBDY``Hs@5NqciUl-{|(5vt#q`M|Wg$D(CYD%3_p40E_nh6cchmH1B!it~-2F z%QM^TkI68U2UQ+nv!zJxk?RJc%UWMYj9RE?xhL3GpOyXxBu#s_sR$qYw-sM?y{wA8 zd~L-XnmVNxQ8Po8lkHw3vR3M81nw6%61{yTG=uyXt8P0Y-?s4#rImg4%>$QB5qaWA zPPx1u)~G5#Ww(Y=!nOI_=@TinFPaD){CLq%`HPURJn&esQ3Sd2zj~SpORVgj-Bi>1 zqg>G69=DaB6FV_hA{VD0Zq>Ru4f#PCOAQ)H_MPRrQqmW1AbL~_Ay+Ph?p*|NXAxAu z8Jv(P>3e`s2f-diH&z3A~Mh4dJ=%CLuZ9>sex?(7%Ui0fUrWMlI& zR-$PW4%J@i)i_>^<808}clL_ljwNsc+(6@&yCh@=?3Kw)s#&XhA+mj`lM6~uZuG$& zd;sA5M&xIU%8BA9ND^agZYlm|x)J&|$(|8;;gSCB&tNX1Z4cg2Ohdpcsfn=X4(asbV6aViR_B`DdE zYty8>T@uK2o7!6b<$WNX%nQ%w+c>X8g0b>6wwyOpWVXXQIS92@C-KgTeHnQ!ef!24 zpm8IN^=g@nW-|Ei)eYvSq;Hv-)VB6NiV2?ojM~nxc6`%->II3mrF+Yy@mdY3@ls%u zG|_u0COeSrqF@=AXaO>^i|d#mc?|V;t{-YZF3wFvmcH#@X8WTjq@!^@zYE}tGV)*) zoqGMq*!fG*@yZgF99$!%-$G-6@kI2{X#Z~d62noZObfqxFz{mBa#U7!g7Eb_db33v z4M>y?Bj}D>=m1Z)+H@pFy`7u}d)zwje zkrH-^Ro38gpmkB|@NE6NG71}3&wx-Echj#*;6VD8Ag%qHilO58w#c8y;>QJGUJnvH;nl^BWFSH83fW678k zN0DoIlPvD^dpLJN(rqu);T5OJ)00AB@ws}lGLy;X{{X1cT>4FAd5#Ib9}L!Szl3`r z1q6cO(`^Z0uMr%XYz*PCY0s|J3tmWK&r>fbOf>um0P}(cV;4;0xdZcQz@Xy}bFd962 z&YA0y(&=a*hF@8eeSIwvu@mY)VPo=ioL(4RN=q>_sJ*F9j-9moXz(W?q@w@_#>n+T zY>0SgU5z0ruJoY5aPsHrp(kDv{AG@Km{K&PmTcUH<$QXZG#@IH?0;!> zQ?)$X(q-Wo4Xifr${Z6XqD#_qVfa3~sf%GdBEcQ#7atBX&wj8qeaSo5V5nxQE(7-F z$59nQMa#X4Y57hz4t+%YtG0?0ZQ3!1UtdgC(qI}&J{NJvJxCft4+S%8yr(m#MIP9a z0nqG-85)%Bd!^E=wVoFpI_|W}?_}5lHB2LkD%{2_W2(Es=wi*Y?-!zu-*hFRR09IH z;sa$g2Jc?E6c>*suesw_U2Aia(irGyY2H?(P-f|qo(T%rgnH(;F>C2h)d-%$%moEQvnajl~*Pq_$68nV>x$#+Y8=B7FEA#~r#H_}}(3^^r^ zFYcpsxaw*0a}NYtjMeg{3qig2#aH{NgW(Z)QhJnWaAKBQZ;KuyK1FvVeD3+NC|by^ z0qPy4*Fq3axw;LRTH2x=Q!g9=;W$BvPuQ#a;ir1=2pb+Bm*#18Mq<@f2TwgL0+&kZ z5FO%3*vIM^x`_EjN~|shVfeV<7LxRD!N~1DkX$Qa5%TB*gkSD#qrep`#I!?Uqvo>$ zPp@9RA984%ebr0g!8?oEJTVVmtPOomeHq#P<5Idq_ zLWWQfAx^WCaAJQEr*2GAWQu!wKm(Y(L~ggto8z+g&X2}`IQJ4{%Cu3gce*?xeFKu0g>c;3#4|HA>vgNHu!CqM19 z`FVxqH9^)`@AFWP_dlT50sVY2OdEW40!ci3&dD?x<0un8W_}9G ztj%p{mB;Moyun~Kc!cn5iV5T719hBhilpx(v)$vdX?=r98uGwijvOM3e)>-EYgapwG2AvOJM=5~}A~mUhm%A8A z-Y7L&EUC}oj4g%XSgXz-IU}I!IL3Z@?&ncqsQ(WrNmBN}9HX^;PtDj+m~`dtZ3q_a z&o{6}HXZRx`68R%MT@%3NQ zTlRZs*fMpY;8&ZmcXGQJ62b*(jAnQRH@6sWm&atWtb29#Sq>FY+-`t29U~i>?eR~E z48CWy&Jy*Z=x=6KCh`T?JE7olctZYhFajFIOuN~y0@)rteCIPfTy#3s$QgVCVF`5vwAYE_Du_%>?@NuOHiHz5 z)b)rg7AeT1=^~myAe_7(O!8x-`p*vJPWcU1wLvpY{gPXPX8RoEkGE@-q8J;Q3B{OI(P+O#m^;;f>P~5B` zgc1y|SM`{a?@>9(MB3Q*H_Cba*=nE|mDh8p#`7eBDh1cW;x#ru{N*u}v6BJmFU>O; zaVYu@2t`kSCZ_&20Wgjn9v$)Q(Oby*^mAQkeD?2a8ll`h8QVBRlE-&&ci*gKoYrqA z*Xo=G!YMY6(4VJZwj8HaD1 z_>+gnWU)-O#8j~YL9~KI+*Ehh$<|2s(YCfc4-ec=-sx@+%{n%a60{XwjhL^?>!|sK!CD`DBaE6Xi__`{cHyk->ZPnd$#lME(>M=2 zB`G3LMh`tw9qmGM zS%IEO(V;bwO3;c-^1+NPhoJMEYrpFFd|PWr*$_)3ufpiZ8X~`@4!kaRAGA>ci`689 zxxZ!<-nVPR1j(~ocfVH?O%tJ=UWwJ6DG6-WU!a2LAo?*mviw<;Hr>!iqC#J7Dd_2! zS9a-RgObk&LYsk}G+xG1KC2n*$!&JVm$C5Qn+N}r7i&7kSer$}TnZi6&S*$sJ#8gu5zO|vJI*Jx zm7|&5+#ly>g;v{*Pw92w_m3$7FjNOs`<#~{5N(r+9rFJ`2Zd`?C4-pqm{>=nytqBn z+HRIOMwM7>e*ovkuad9=hTPnM(xKlEqJ2HZid2i--#f!&4KoLU{^w~-Yd-8~v91S6J-4>Fhc{oPQZWwN6TO+zCI3atW>q72s z+%7CnK4$?~d6oy06gx)ih4@-nA90{TcwY2z-UYb`JEn}KS?A%i-3c<^=5*@n?R#kB?K(XujU zQK%|efB8UTmmK5*KX@i{x%(0%7 zZJ*>7zgMe8icpDfld4!mO3nxJr8j-x@}6u6%Y8d?m&Lptsi8Dtaxnq{DvvUa_#{d(^}1jS6t zuGF-NY+Mu2XoJ76Gajn&!gHZ4@5r{_@Ut%Ey?0Aw%z`L`?KkZm~V@o&g*%LNOh z%=_SX4xG`;hl7kIcs(=B-kW4cJ#<@xwi(pMIrmw72_8sgr1G)5?gkrt6H(G^8>8`phh=yNFpd2H(>hRx@J|CxBDwY^E}yVQ@;^8GSXP<=*$hganml#gcshj zz2}dF>v) zp(%mHqVCxqv__SMTSJ=iF$G-q2Umr9fHBZUG*leR7UU=>*R~ce?p4iK-jdX*39?5- z>*>AMhNtbzpn z{{{T-0s%r)s3A@WG5#OA7z7B>#X7?Z;oRDUI$c=CC`wZhX<(LfCEWmKslm=fT>F{i z_(q)T0NVk7OBS3gX=07oy-0hQqXLc?Z}-OqmtIX!z)eu_qenYOyGN7OG%x4leKvBe8Thj zvcapR%^)K^3^_1)Y^98;f8csZ)6b%#&tRc_I1kEK)Q!9D0n{@`)X3z9n;(KMeVOuj z`2hUR&+cM`Y7+iN!-Ef_0_lwXp%w6iFINkya(#CZ!i=M}j4@iX9Nq2*P9AHjg*P=1 zCb)fx7DM(rcBjgCTmgyK#*j~;SB7E)uS-3et&^V<2D?l`IiM6Y(WTAt;;PTW|3&%8{>~8YfrLR2UgBW$+kzL6yOFO<=zbS3X5+&ON;XdK~ zuO6!1EIV*IQ_86F67LDZ{+Nb)`(Ze*`*3Yd1+_}<`e{)^^{n(fN?f4qzW;h7@BAMC zZ9tO0tnat5NeGme0(DTWj9W;n!#mh47OvY(P)FKtAI!7<8!x$X7gFNTm?A{y)DxSs z5EG>0K;6yJ(Si!8TadExS2thL_@sxtQ8cF}@LYNIpB6N^p#hgR;JCx@VgPgwLk13JB zhy{bO*R5%kV32HP4)OPyFFjf*+28cM-LoWrKMvvrwJaX_NDX1WA)JzFPL3)Xdy{Hp z3xAN8^hyJ_O4i8y?%fVtH-n7-SRWXT6Rr~P^qzScoNa}-d zsc<++bEzz1-r1_k;LL(k(G3!>m2180M8eyZc+{p-tn?wu=Yxd4Pf8Mwx_e`kLAP|s zH8$6tM!K1TjJIDB*iN&^^g9L2aXxy35hze+;9){^^U{QRK9x)ELHS&BL8P!>PYp>? zfzKmgd2v6Cp)T?w@#sK?rdg7iTjoZQmYI1G4$tATfIgV_SL}0}^vi%*!s5`D8F^x= zZQfx(5i#)wYznugD-xg~00?sU>2FG+f8mnIx&;J?^QFI*Kn;;IC(oMI_e^PrYeQd< zS+GRQ4sy+SCM~of=6cA_Hz{H@`UjxcW4UrI%I(j!BXz72^~dw75<&4EE%-j7De8(c zu)%OKcyceIrNZrESV2NXg@6#I4Bs)uFUtf?dMSIk4tF6k&x*(mzM)j8n=oTU**ZfF zxAtebm#GNpB9a<~#BPa3%by@~;n z+LhH@?xiZHv;*1TecncQH!j`LI2L6PjAZjH2KMCVsQDQRzl{Vuc^Fi4=>Gr)dBmC_ z4iF0sj6akU%vhB{{{UU=`xVbjb1S0fM(d7VgW1s1nOM~}_35F;TOmVHCW0pfXJ*Gf^=Hg9(EUoshXhtww zE5Vwy;n2Fe;qo&YdhP&A)|XS+z|CZ*M^dM!lA-}!jQ;>`3}{brKtS4Ep7Fod^Sm^` zR%rFhx(7tJRsivzoC*OI8K2&;{F@wVJet!KjU>fFU~vcR!0G7y?ou?594PM>dq`?y zZ{(&SpfL#0WP*~{bnmlRk zLu)fbS{biyM9OZ&vp(uX>+sOZS>K^fIo$RPFZ1XcQA1J3*HG#X-@HEE1>2ZRAj4Us-2mkkO~f3qH}0v zydx$nd~E*!luB->R-W}NuuaB+MYu{L0(>~eXx|~EGQuxi!2qPP3~nW78`Hw6e@jZD z5KT*K>OP=|T0xKtXM&#q5J^?^%}2w$U$eJyCaoB2jMOL?Fb-Ud;)1|EW=0CrW3*R+ zIWa_tpm<3T@M0O&W;CTfUirVmA^H0F5*X@kYg&1@OX--CW^e#H!vtGYGe|{%pS&Ls zEF=cGA0oognk&5N>HCqS$Ew8%_RQdO+lMOK07^h-yN>0op;4zy*!hL72SPia3&fXZ zJP&n{LA{vWwwO;m(BxGvA?yb#kF9fIn@mEB3 z)I2o~dcYxVYMcx;8%kQ?@|h&XoBBwdzLSzkp}wF;c1RYxR6$e?_!kV)AsVE5WSWpv z$ZB8~RSH_b>NIeEm4rT7GZHEToWH$D(O2x+$l+z zl(je8GbxzuSh`O_#8d+u=c`X1Fh$bu+xHScpW0DZYEQ*8aykWLnC-jlhmq(xS+WtQ zT*1v(@c3PPk4^y7sZD%%5`TYYZxP0mJayMY{kb=1`SL9fDu zqcxtX`1$)|LTXG<)U073InlluBP2CUm&HXBBW-w`AYTp0l%jSJM_nZ5Lhk{&jtT5S z)VP%A(Zkr4qNBXkMkJpCFLiA(?(+Ml-4-E{iSLrd z??UQ~x~~QDnqcw#>VmW}#g&0#*(r@s|;}PAO8UMnpvX7Yq?lp8buYAKoba*1&ds+K6OI2DB%n>K;#RvNjgX| zve)?Nka~xY-ytAAQf+ypO!hR09>D-d!9fJmgnFZCMW@D~7F35vcEbI%Az6HfjkqvX z{q2~b#ARB2LUYjQ1S&B(*$6_279M)b`^T7{^7#dX%j1m4DGfoVJs&Tp=>Z*Qw}wcH zI*mc8bMh-nu@H@$IV<}7&J~LO%-l+H;uV``iTUro0I-}Y&)|GNgs#AY#H=k5tC4m| z=Oz)Vt4jnb{0(NgQte(WSTbTzojEImDFrut;f(2T=F;S39deEwmCx`r))5z2NjSzI z(9w+Np>dzA$sr3-0qvE@8@F)k{jW#*vvNL+PWl4s#(h>G2XX?N)Z zuSD>Zeq2mKkHwYNTC{bC;PWe#-tD@IKSSN8edqPy$@K}!aSZqJNqNV0a%7n3p)#l7Q?PwuxD3aSA$fgI z8up0&Tubo=m~mRm$~*(uB@j26?pMW7#t(?c6C(4TWb1X1^9evU84 zeSosw0hyg|04RBH9;K_yIHlFYLMo~hnto_Zewv1)yKXOo5)ICWk#Z=+v7biJV^I03yqztZbL5%<%8@vAiG+MPQaknLQr z6(lWs9_{N{GmEH{i1i`O-~sZ`@CAr*3GntdGAFAB)kV{KgyWI0qAyX?OTPG|Ukk!e z=V2{-JVUw5xN8^|{gqs*AfN++mP5xOKGU&=?R6Y$FQ6=@le=?G3$QAh8jaKWjCnGa zcRY8{L9Di4H^ndhG5DD;GevH)`!mJiBM(4N$UrvDnX!k7NtKLdS7X$0I=TEESa%bI z2z7TViuA^_cr&Vc-BdoYNN)6|f#3o161^YEy_XS;rG4Bvy%~q8+=wC$V)*Zj4vzsZ zE<6I$)I{>oEmfrI#xPE~FR z$i!FL0Rc^zr2t~)NsBHT(q5X;SqvYc12Q!CWH?xcu2Q^G&A>Uc`8(J(8;wbSWLr4g zbS#11oT&tCMA7s4D{p{Y+p4+RYkJ^-B!$F}T{7>vvv7hb^I${#mf^8x3T8^i3@$ZD zm+f+zMOiLOO-CS#cq`*7a=FLbVx>!G{U&nzH7Pt6tn@FRmNm#GU55vpGGo#V?nXZ; zg1AiD!M3pJWb{b;S47>`52GWm zmTofW^*IfMf`dO9TuYO~4LRCmJaCEzAs$d#PCAJTuMmArYC?3OX+)h3MW>bII*0=V zNuC8Dp5d3AB;*P!n%-uFy_bCOGhpY`>PA_PqxKb3#0Rs7jIbfr%s|gyhDVR(RPLWC z!9E5e$<90M47&I-H_0yZIf4Yg4-4o69tov!svo4WWN%O>ZLgGATT0IBRkqCOkQRsG zp?bi?-o2?jzOjb-lQb!P7)QGE{UZV*=Rqrw{RxJ!vZY%Ui_<{jh(LI5`AJ7bPsyj3 zsBE3OM3bCtNh1hg1oTa&wD}#zGKz1q1t+k|HWGa!(bPeT9f}7BD-4ZrZrUK~H7SZb z3BuN{6?)jmQr#cH$f1*MOQ+5;y+Ukl$+Qcg~M{K;OkvK2q^m+hz@} zu6vX@B8P2__a#cq!JM@#ZUqiD6-L5olmIo)gJSk#6bpotH`3ePaAfOCLN90x_v3;G z;t0)*XNpZ5b_*5pT0m^%LO>TYqTSF-TS%9*RZ>(&*awWQ+?$_!bOI3|Xcj~TNE)?( z9P<3L)^K=^H!5|0r3)G%v^o}Lp=}&gpF6U1oa8bm+ESTnDF(Z){P49)IBuYs=MP#T z)7YtS!F>byzMGzv;`tLGi+M?8q0S5;D^>0Yp5#V(M*jfq%u45RKp4DE@FfSBg_v>1 z+^A&@f$b2uBO&c7%IBU3l(=$&zta+{zpq2g|TjU${0=}s39Y* za(iJ!rYwb^M2lr>D|`y+txfT-#j(2$-;R{of|O?Lsds~l#ev8wbNLv<2$?$;zEQa| z8w9SK$Fu9|FW3f}4^ZP4EV&qf0^$*TI;7GZhj}iQoB8?6W-G7&JSxiy% zOEL?DF30c;n{sw%gm5+QmpiS`tyIowR8uxitIO#D#n|gI^&>V& z=fN~sB(h3O$RfCepd_j)$r|+qIxuw=$vhA;J3H~DUNfr^hp%<2XQT|gXoDt-6%K(w z?md%&@yZZu0IqCOn^?WOs}C{P^`a)m1B)H82!zUDa2@B<3j8|h8cXE23#g8rh!69| zvF93I60%&eSwvmPhg zxGt#(=(hzyRIWO9S?%S1;%KIxCzs(-vsk@=8v9loY7Hy}F+vpSO{L)mWWrs<2~x6RM!NGEAgH406FZO^IR5}gkFV3u$HGx55INzs zX7{Voi-=Gs^D9mV8+T}9DSXBL61iu3N3~II+^M29UJZ4n5>LeMu49FDguLX_|R@cqN#xlknP3vVu3O7T6 z1YdBa6u*t2mGAG~$dWJ|b||2b22h#Ka&xWnB>MIG0Xt+8-oK?Ui9!1xLt_*td={&B zrx$sL>Z`0d0&_TKDgojw*kn(fK1^+K+}GFm1Bg z2mRvgiCjTR3Snm< zjxU_DH28}gxIkYALYWHzoFXj|=T~{=^+Z-WgA)ZIZ{o8{be|= z*d>h(Heit^W6#{(qbWz&7t{2kR(f`JqV|#qR9V-xGszEfS`dw9xC}=8#|a7?F zLWon^-QSf?5!Az=ok}*(b!rXxiG>kt;28CMJi|49Yk@Wf2A@quASSxPEBI=^?qk*k z%UG9YqiG38J7LNMO%XHY-t{SU<=y};$k3{JB}8ul1BxgLH^iK#;^DMOil!jrh!7ot za)+7Q=KiE%L*HM-0`{TWQqces1jQijjG=%8<#Fd%Fzv?JncJrPMMlS*?I${V{9{%| zm*pke_98$$d176_GX0t#F~95#YF!sq*nd`Z!|Y?9clUD>o^R~cVa zDa(0DZsI;&wgv7H&gzH7sy>6tBoVhl;x#OV3V6=mcYMM^xiUX|+ljdYLBO1Xzu=sn zz9>SUvS5ssO<$~kf5NE51EezSgI1Qds6y>!>o{83i z%}!mF)1*gaJ(8ju`b+eTQ|*Gi_QOGNN|_~bMh|L5B8hNZ9e>pa^1e_= zX(iZDYsW4O;bNYy1Fpm7(I{_ffy(SANH2(e##V6#np}&wn<4<7@+mKxx?-}qAckuj zMJC=D!CZ!f(JgynV$UqW+-<|gR&E+RB=N8eONBm>yaOrVVCHa(zDl!R=UeSxho$)F z^M4#`Hy5UGh#?;)0ywO!%nH3}Yjvt(HR#zeTxnHMV&dR%OVgJvBadkKOZRi~BzDR= zPj&zgJ!@^a`LHk}xqed@?a3H(h9$RRks{UiZn*@FkmVtKGt&@-W-OdyD_C5@Yb86U zoBLUkvkgG$cn7zjjceTY<4k0viYS$yxZt|dqEQS64nqj@@B_FCvIQIkLMw}tu-{fF z1Hx9s8LLoexCjzN=K$0qcs)|7F^e7=0OSVZ{ zKx;ZTTNxs~#C~ zMn=qBy#tAS7U=t=DE0`RL?Yw(&fd*|okR&P&6!h+H#w&8qdzX} zMV!l=SK)Up6)Z5432StmNMU-*SP#`?6(Y9Q0|?v`6V#<&Yg>?!6?XzFR??J?VSPN0 zzxQ_K*GWuR1&|gq_6Gyq25l)@qew!bV4d*^XieMR(ZBBvx(x1+10`5)-2$vOAlBy$Sm z+R}1VT?96|12A_?fP}8*{ks0Qq8rI$6-{-4t3pa{cvNz38*oE0vso0hY$0xL#W5jM zDG^Gh!+C}O0CP11ljHR5whGuW2oBN9H#~*=(}Bmp08?kDs!QnbYc)B(olJ!duTJF^ zWM)f*Hb?G*{E#BWufZ%V>1&bzHb22*+^#N+V2h4-0of-Rs5||Dcx1INhW4>C+&7Wm zS$wx=*K1eEIWk)DIVTTYN(Wdq z(asDD_`KJGp!YWs+L$NL$k%?dN8bKM2Ad79C*c18j0CWCoWaV1Rnjrb)sKx^HL8x6 z!jCERVl(Kp`(wY3YLeso+pb|zWZuh_y z(YZn8JssrpayXC1<2tazppH(~!pj#2O>Q@b5;;b~Vq zhI?zEv^Cxgb*Aly-I3I_iTEEEs7y+yGN!Jr=0niOxQZ#BHNT5w;Q~~lixW&AA)&@Q zj>{@v&yj1l$(qM{8qa7@>mD}Ie9`aV-Ej@UOt~R#Y@Oq5qOK}p!*_-p&7=a*hIW&I-=4A)QWKW( zrhA_BmWvDj0En_YAwbSk7gxNKeaWgfgitxuhyy&5ApJUf%H0YQ-mn};S9pu7;1ck3{cgqDPED=ve@R==y0Ao~g$FL;^x~}Pte0iu= zK==pgXP?JvmN9G_m(7!kSb&1<&kTx_GrCct`QZY!fhIUCuE*58#^ zKV02&0q+Pyw$Dml+RkPrCL-#fs`MZs0F2Hu^X3qhWl~;s17wpHEEXMhA;?Pf?uC_c z&2AWZGf)fqghD z5fLS6SM#~^P0VAZ*)xvABGZ9Qw|J<`-<$gFC>5Sz_n>(|Ch{wKC3iBT(`ttSu#Qtq zZklXwgV96j`p~ROx^}hdjg70I>gjo#`7)%}ZpX!vB{o#)5!zV-ZV@c>jgH_#A9#LRez#Mo6Bt-=GYUylXZJQAT30ycdG-51$uTx5@PWT>)gP! zR|0u6ulW4%utXMEwR(9-Zq?}dLmB5VgXvletk|Rv*n81VR=G*DaeT(4Yg)OPf0NzI z(in#BPssS6lA;g|fkahdcd}`hB~k;N_y&l{1PLXmRpxO20CFZaG9WmqQJu4A8ihl0 zOLWQ>-9e;W_8+T}VOugMc3=NZ>-`)Nrd|d>x_% ziVZW1@NzKZy#Q3Zxx!SlIm~mj#WYC12k!H*-BCXgL-tAnyn+c2&{({BBR-S_H7-{R zpm}0%=9ptwsqW2N2EahkFTB?(q-LgTq40T|`X*Y;k_`+*rx1I@a=wv}<`1@{RgmiR zvs8W}{-vSO^IQ)IS=7}xC_uR6MAW!=BTT94X*CX6;Fiu;K4iuYgvA1Ag->QU53Va7 z@VcK$L00d;vTM183Er^~7yc2j-O+wwC4k72bqyi+mScUWO&?N~rz$y=OAo!8d8V~F zHq-|tB!Ugwt*j9II3*Jt$zBhpXD1bE)N;#5XFp}iSy*gp$K{Ta^>xgYxUv> z#ZAq1+;&H9ksvT>W$tAz(G zb>ghVqjkE{ioxC_B7w(tce_x8xusPU39OdTkmisfQS*vE|YWvEPkrUn?ut(`UN~HQj-#L?Vchf(k#cx)o&W zt%f4L=M;9(SK9~%51$s`o5Jijt4z+2LJUi7X2iY$6w?!j32IW3+jnjG=V_uNkLkws&0n+2QAN$$V01@qPv47Ji> z8KXZp)OMVwdZey~L2g!APdZ?B8jO%~zXS(gb9fylXxT+x9|s1Tet?XC%`eazPb$U) z@`r=;UP~%Z z>5SyxrW4v>`{U%L6xOA^^iZH2l9}^gA*(LSzg$o%a z9Df*vXBqioA+<<5SutZ#bY@brq+`PLD|KNGO`#dim>V6`DNC zF-2hFw4+86?T%1aU&}pzt>=}#$j{^;PB&XD1l0g~gxtLnCxNS3kkOVw-$VfgmrXxJ zVo_y|NuhlpG9|q_ePeOPvU4jb!AGK0$>HI*@PLHAZt6$K^~Lyoq1)g-agR}!>Z}ew z-2jJ!)P|?FTb^4U9!QO47Z)-r93=BZdzN&Cozr|z5N7iO;MqT8J<2R01wO&p zxv7^9ZSOO^=Ab#0ft{bghX|nxXpf0x@4f}j{T;>*UoB-VYziVD z7HLoM#Gh&n8eJzAnM=bMO`|x(Okc}mL&%P{h@CBVAXUpcOdHfj{8L3?l6+v4Oxl@K zK%L%aYZ(4b7b)IDSh@cIW$&jTR+*hY9(Qk{qxtDZW0tsRF;5~r&yf%X8ZhnkrFT51 z0sV*D^LY%2{8zwxzYCK>nL^}AW!2Nha zAzcdzr8?5jhB3|)4Xb9Ec#&QUN7JNHeCRRZ?qwBt~TTf)XK4a8-V^T0d%O1O0znW)(2UQ-=zEc?ctD z66F2@&qwAdH!DE2kk+|k)uuDRDPrRzaIx7`cn0;Z_%~G09r}nV2$#7glQfstTX6N^ z^AfMV3y_IU4c!?D9pE{^J`L1P=(#5Jbw_MkXC>#FOqu^f?}qK z7Lt%sJ_kT ziMYXC7#PLd>bcL@<$(5uI3?1_RLXWjx?Wr$T&38_Itwc9QDRM0XUGeU1H(!av^eiN zfJOIp0mEmh9)fK&X)zcl_zeYYF~;m6iXk(8Lo28$R76ch{{UTjIs#ylR$c-^dR{7S zDCRFbN}gT#u|3V#o?P7p($XW zdoE$`4&1$I?~9uKbKn*@Rh`!g-~jGPuI4!ICwc}fof)#Attl-_y8PO=@>~*$f7S2^ zfcZ|1cUJGL5?BEAF)Hr0P-0W;uU}ylgHr=I)2P7-B=jtwRkf&bdPJiI%(vx~DVdqy zUest>3P}}I-0Ozr8jk^uQeZZN_)^yQV|ZQ26~;NPn`8*Ak%>ptDv)+RQGaPuER7y| zXD5YY$meZb^6aJhwj-3*r4jCCI!OT>I%;}l@Q;diWk@)kLTq37C$r>~Zlz6ZHP_qN zXjX1P*Lml1JEm*OU^zlRR!BMWTwmZ==-gmf#D)Ff-_b^SVP z%o@#l4f>>HGd;LT{t^A(WRrz^e*Idh?EH ztO%L_^9Gz0szo zrQAuOg=BK}cts&UEtUSj7hC*?L-R2VL zj!0%S-rH?(iykD@Tkf*&-@KzN$Gz;rd`z}=B9mdJ;`$wTlEUd4hS4hy_~59#=6xu= zb0xBq6>6=dK?X=RnnJBpDsD&0Y#90@1{|A7oKwmj3`fk&vLx zn2_v1yj*NjqcF77NRw!o(h%3It4h&BtH)cb-CQ%txlOB$MyiUX^rNK!H; z{711`o2LZ=)N>~+RXo0?*r@tq-llHiGAy{9F5sGpleN2obLQ3M>YzEy$*ngh$@l{+ zJx&)rV4wXTK<^VE^qW&6m75l9-Yz9rxQ%Q|!~q5;cu&Vu6uF-g1li;Ya=OYcP)>qq z@2{4!7Vfc(?E6nJ&UJ}j%v_5x@GW73Gnc28{mBpj#cGGgDr6-VT70ft9( z#cG)y7rP0385sIb$12)*h7BFr5)*tk$V6Z>jc1~CjJe0AiqgA4*1+2(>g%BpC0U{& z#RM*$eV1L{kEl`g47lULQrcmMLsp^fQ1b>(EWRf!G0(CzvDRVF6VBt-f!}90fWg9P zK4gGk+xnWx!{Z7s*AwfEjv1p#VsfcU_vUBhmn(2pw_n7{a`$kGjNIOp&{tz@gO(w` zX-T0|hMN(7Bw(5r3|Sx}eq$4RT+mUT)MIVHxtilsNuw8{pXyrNDvBxu8Qhxf8E2ae zZZ5bB{#x^ab^+Q*^JEV!FHjOtc&b#&S-0fFr;|Ir9SPR14}i$*%K+-m;>-=L2Fqj( z(C7=_1+$JzjQ0WY7Pc%IJ%>QlY6vJu4JiPaFh+7qfQrMsUOm|jFJ}<&@mV-aGmMSF zkKe48AL-?k)fQ4G%#I5G#&a>`uDQFL9l zx4RO=N^D4^`Jch?MdXh-JU2FG?6w7fSHprZVqhsMdfDd9JchloF_M)u006z~= z-L9!xO3#Htx+OcZBmvJMf?QQPfRfS%iRW53%eebCRNUMY`&$K*S2%6}Ra}rfN+V*# z1YGVXe91xKDJDNxM<-y-RMS2-HkWzt9g!!8Iw%H?eD=Gd_~b;^-EEnNgV=GBF7@({ zcZKDe@hTdXQ^Jjn=)=A%TPUKgZ#g)MN@6!%#9aJ`v!B4=;fA#m&VOEl)}F+(xH>?Y z9k*2A8am}Zhif>7{v^}vBn_%;7_M&jsDVH`R>tq%6rw!au5dDN=WpSwd&c$nT!w*< z+Pn1&uwd39g(pyzYoHQjS48(s8(38Y5RfLF4NUPd&YntgZ^}QuRHyUM!=P=3O!V@! z(=2Lk3Tc5C_^li#?#OjBP zTR(E>0m@H{0wp86xOOO#^gW$P4+4Bc6$Ey9bXow$TmYL@gTqb{jKlzH3Ox7CRG9C9 zzX`*7+*K2OSeUsRY-9^z1~Toso7}a)WBQ57k5Ozac#u4FshiH{9j%NDV~@ZXe|DAt ztB|EDs8#-h5#YeYXdGOS@MKrH4h~XJwjdUj8gnk9?wx1{$=WT-;;>?v&y3G@Mm6Z( zpO8a`X7-yoru_5QNG1EE`6vPM z?(Q$Wso6srfrZfH!G_br94wo@ zm&IhaO^UKzTSBy>z2)gt?E0`oMPsqB2X{kNoI)C@y)7Tfu;Qx89pk$57b8K^hHtmYsH0*4$>($XDVL zyBnX`*zRS+WVagUW^1dTu~2Ezd+g#6;)Q%|cq`}pI;U~dJTkz^EcpgK~``rxT3#LV`Kb-C^uMk+6*Am*ipk2RJqQ_3Kt1nm~av_tsIR#WH#BDz$2+K;JXZ%1R4t za!(DE6DDk|>Ef~BD7v)`n?7};j%NgkmUjU7Ko>J1qXf&SJKjz^XjvBnv**-rT9F~| zc;76YTe)c2yqA9?uTstDcUxE{n)d zqmh6Fs7(CHb0)fNAYuvKbIf4sGS@fkWkCl&xFQNr6bDFMC`ZZ~lJ3cY>Vg%!7NUP# z_;$Si0MMl=s9sJO(F9zqEFGf(<>kyoKSOO>fyWR^a&M}oM!>Ch5MbLXt!AwF0}<$n ze|v$$TgeC=AnG!I)DTwyVOkX3OcYy}{)HUNH^UL}6D?ihH#=79YMCsP8D?BB#*CC1 zL*2#?>wZ8y=}t)beB9Q>rcp)k%E*&%E>pxI7;dcHScZ=9+X839l2znwl+`Dix?55U zo@fCNA9$`BPr1UltmFt}&vid!1JVn?RL3Y>}W zBOaL#bI3ua#D=T|=4>(vrzFP`8S=3^v|hFoAHEw{EJ47a)Y$>NoSJxFmD zV-EDr>+-GskwhtpU|{?QRG(h2P~B73Hv+_zP{-Im2zpI3K1Sn)Ero@v0}L{~13_sb~F6PJeZI`L$h$t3kwzjjM);L}nVT>D2nv zS@V*n1#?w*Bpb=-JNN23F*8e*@_7fnL?v4_Uh_xR6#^nOs2o~tY{UV8+7_T%e4%^i zp@F44VL5Y=;43}P+i!mSQp5bksRKvC6YzRK)>q;3GTG^Y!tpuzKFY6QDyc9MfVKTT zZL7$9Ri)uJKv19&3s!)5&tu}i({W2*u~{WjHwGTBcJALFuEm{073Q&phHsqoi6WA5 zo!+{LPukdTmm~;Sgo2Co`e=Zff^0`l=<)VarISF)?~{V2`UHefSi^F?AsE6i=)X{~ z=HG2(`1Xz15c#)|t6i@R2>_Y)P-CLU%H1t2FR)T|rdkn8!>qPOTSfRd`l{luC` z0~I8h6&xHI@lhd3Kp_SE3g0UZ_d}5vMW_bC>tM8T8gkALD@kGuRo$c6c7UoZZZgyE zF@wg<7XW3iT4Zgf*D&)ievdO17C_|6^Q@CtX2y8s(8_AU6ntOtLP2L@9xS+uj!=DV zrQ88CbS58gyYc$8ed~e5N>CZU^ThG{u{&x_v6x$NKZsJVp9Vi*ZF~}NhVCNK=rNW0 zIe?Fa9!41vqW+9#&QyO-nhBZhy0ZnN)1*A>XX;P%-GcNnpz|P%3pdg?pu>p@bI7cb*OK^*`2PS}>_IIeFCGud ze}=yh-C`d5_ObqYP`>9L>&J;2neWJs$X z_M^?jP6(SpkAN>Pml9C`Fu?M_El~K|Od(>N$eJ8&EAFp@;k|qnVdNz<3Fm#~6pNsU z_+WWHl&PV6Mm@_n+oUykgeV+`m#;x^={&SD@7{`tmy>3uvMD}vy(wRQ+|`K-0S#Y2 z?h&7fkoo&K!D}JZYg{ZIc~wBAXrHhv{xlrx#ljEqnp8QOuFw-|AZKMcP$oEeVzoQj z3n@Igp+8Z9GM?r!`X-Lr80$fb)xBd=*dKFaR45*bXUQ+1JM1=<3fvf8i0Z^aXzRGe za{R51MVeuy;#>%I20)C5j2-7boDjVl+Dy;Boy3T4o~nulXZAC`0;rf)LDm?g5V`(H z*f$8B0T!OLgtZMcU{qBL+ZJzv*2H9K=yW0EMBcHAc1o4T_75r`F0M`P*Jtb<1Jq4f zkBH$}f1RY6#K*>wO2{g|I1UEB%K|Cok&l0YCC7x)vhX#SqKgulyLjsKHk*klVS-`N z;-4}{>p=)$R*k4)&TywTj)bW=HrZwLZxyJ@%k!eJYdlb}m|F~;T`tfI5X%&vb?)!2 zI(f5eFXFA^ADHN3Iukmy=F(Sv3W2Q81RD@^|Y z?V?e8>6=T?HeSNTO-BuwzS%sRjx#WoBjyZp#pot;zht-?>Sp9|`*Q0}zSfz5!xoq` zF&W;YCs{r%0O)29IU*(%c=(o@o>LC7{W3M+;Az&?W}^P3^9@>?b5qnz`YB-m4EA8{ zY`_!imcbN+jZatp*Uy@V&EIiwzO2AK#E6asx5UzLkPbG;jMi~9k`|jFh@Sv$qX*C) z`)p)94W5K$))$P_COx6iP7ucUwFCY@Wh^Ma&=>Qf9r-5Kqq-HlAJ|J5- zNmN55R!$xhWMZ}sJyZZl8O04=Sm48g$=Ie2gsaD;dRRgT^!2eFIY`X0l}cS|n1f)+ zrTX}^l*OjKnwl~z>RId7E*kL-S&oR3XAMnzpDUszF9FfEs;9m^4+zLyg)fwC&;eJP z9T9DJbsZpISqa9M-__GKuMC`RXVjC3&Vxxg1J~ zb75;u1{tBKxm8)+ag_5_Vz$f+hQ&6lB~wKQ!p;_nRT)y6kOiZ15mP$v^Ab2VST&M@nhv(h}&T;r;=QAd;-o1Zacqz z00lRXVjixPAPZ}^qBku75=%zN0~M}LOIFa5U;hB(;w{Uylf1${K#Z<4?_?nlvJ<=z zO^ub`PUPxedHdBmDKW2+i;J{eDAWy=DT~cLp&wpF@8~w=?A`@sxa5`FS1tFCnKu6b zbFe#GekAa{lYw;V_~IL-wPlJx=O8{~F$A)Rxw00HB|JKiFt&?jbc zF-5)kAcXb(G$_(QSO9YAv-2;G(G8Ptt)2>yArRU8K_mG0QG^J>3rHcJs+EAdS zYycT8UWs%Z64j{HqQ~Z;oK4=ysw^69>$WV@Ozl# zasp)YOunT~1lPiz^#b|zfz7LXnN1LO<4>^?B->hkPdV-j z+n2yY+EW2Cn_a1$i(Rol{KaoJDEr$bWKXu!nx*8 zvWgIibKZ<%!#>tx4*+dIlD}>u^@LeAFksZ9kgR<=!NIZ(DJOWrfRV1^Q8YvCR9J%& z!MQq#Ai^9uL0HxVun#eiwHgJ2Hy~*Zm`(|s1MGvue?)Q>g-Ypc2#EX-QHw#(TYxQf zzj4D7pShLbCK6UpQXJe^*!b>PqEQJVP&_t6p#hcP)UlgrgpTvg878vuz;_bi^l1wc z3WYk ztLeiUohs%a?JB8VeBCOb2&DrZW-Rt&fFWrV)+EjGWgKOBkfTaTF!dbG1{=r~2Bh+P zpdA4>%Mn+6X;1-uhsXK-NB0VaCB32XGe9Di88gcpn?HNCFpkXl_Jr1KD`K#~7m~v| zZX9wT4G^BZ2|(h^Fc}R=vWK&ULI}r^?RR#RbX&(PGzj^+g+gOUdLRU#&m7uj3xnN# z$Z-*P%}E>iXUsg?W`+p!LLLM%u9#G#%#bLrD_t+T5X-msRNhRm4)GR!aR4cSt%T~q zO;{6KCw34a&Q=AyHG{1yydmX(`m*6#T%S~~J1KK#ITj>Mp=N1)g1=C{KPrrPN_3iF z66KA&B%5GO-UEtl^K}i*y}4N@KZE1}y$1S|VTHsYv1Py_#iWZ+BIj$59z}@4N!(_* z2}F60>ZS2M6qxn&=1Jpy1idpdf(mlKKB8)G_#(pK)g_eF!lY=OVV{3J^aS<23`OCY zgyvXmnP=s_-JB%P3Z&jKQs`miArbkDW|yL&5D_f1=xQzXVn84qQLpsRVDYZp%|>7! z32M#SFk7t>XwMI5;n=&tP=HEc+|2=Pu?gL%^8mlTlm)_2s9Jli zE|sU#C$S5L8t_f9X^6y4B{!VD-gg{yPDfUC5nO9KM*PS8Y4!s72UKtn1t_PTWRef~ zm)AoXO<12p*2qz|b3ny;Ok1R~+pa;uU<*;+9&a%E_DH2`#;?GA(rz|T=`P0hdMEi6 zrOF!r;ydNlR>pZkvQy94TqLp9E{|I0UT^g-{NUd5tpbwe{I)8*447&yT=YIb9I!gQwG1 zQj0w(U8ib$f}c$UM{noZzV?$F+vnb*1(9khecqr~(xt$b>|8-^Rf06s@1tgm|Qau+1#Ubu%X-nZ6 zUDQMzf*y!mDC}P~bZ&e4PRD|2Wu`JBYFRHt*_%b-*Y<-A1V!j$ib;uNt8N1*jX9dG zI~OxI5gbBW^qOx0I+su^jeZ1WWkYHZY}T68;XO!&XeEVHjcxc;Y%3dYAfn;5Owob0 z{XzicCSg5&vJ&Yh;3Rtlsf!aNWJ73jWcd<=N>s5vhX!DZ`H!6s5ML2h<_5n_qfaBP zoH~g^MkaXM^?y6-fvQ_OoqjH-YMz)1G+qo8gd_*zNvq=AAS_gC$bk+)R)|c1UQsY5 z6r4*$n=vpQ6BD5o4Tunc7*wt%(e0!q_#+Ak*2yC2$+J|HLGaG>}aYj|jaX(c)>Y)kJ>}4Cx zqrq61nmH;NTFB5|;!0)>Bxdl9cL^5>CIP>{s06A%KV*?7aPqbtPZ1arP+v;Jp)vk( z{gB){Y&OK8q&3cCQ8*)Xo=s^v?bHwBf)e(2fa~%jTFaYYS9T1=a>j#48BX? z3bZqEe>>m;jAJz=O+bAZTsG+LYYAsmt4*IQ5dZ)Qj@q zkM$E#q!Ow-NA8W?p$OEqjb~RSU_0%KUL;t^YNu_@%1Gk2pQSrh;&seRz6&%i2c?~7 ztvAWEiXD`cNiOO{S89jM=bh1frS-te8hL+hv8^Z(AA1m+Z7&?A8KOUs$izqBuOt|B z^6_T1a^+XGH2i}+@nWOfRM)gskCZOLB}{ar{%U^l7&xJpNv?#!Dx+03JzuQBY&crY zw0AC|wW4A^Hdx14>R0LqhLPBi!7x-|DK{Q8jUXX%BuH6RXY$d^;B{O=_E$;*2~77j z(lX&43PASrOnYIRWqPQ>_1!BVY1VXZzIRRUB?Yv*GQb)T6FFLKrB-ye_9vi?6$Dhd zJZ%8Jq5I$7j)sy~`_UnOtiALCkpBR{sw3wkGiXs!HE33Dp#gR<8b|xlu;jhF6zBSK zp;Femgp$>`3m*`5{Uu#0z(>#ziWexN;QHZ}{{Uokk~c90n4(Vz@eq0~daZ&jd|4DI zq?Nqf-7!eQepvHV5hU}k%9@b*7}h(^Hou^S@;vEU$>8Sug1C@Pehd9jENk)7c9X(@ zP_szdM}wkMIX|JCq)H?f!cC~cuom#l7XrAcv686AK!zyUk31n|iU2Exk0Z>&%SYin zNKU3rbrgPJ#sQLO%&zq(Br?;ru_j~fOGWi3G38=pS0PkPzAT%SnB5a-3kFyCrp5f? zhc41&i{LQkxU|Q;d^e!$cd>_>8+>7@D~yThJ-`D-SCF-@o3rIf+n{o}-$g!W=%qm$ zwat*`eh;mDh`i3X?g3mH0p3;0?X<@_a}z~Qg?3paB$|De?Ft8?93i2%v6(#!tXXPt zG=~j=Ti?@DqL9?=mJLiBk+5YrF-IQ!VwtNAe_k(<&wii!Um9y14C6olh z%ne*Wd4dPNQY9*i2QY%IPQcKU7x%`qLXv@jEMbD&Ov@MJe!^a2H0&xe$P1YK1ab7- z)%$J{e%zka!t^1~4pDUb1?GwoU01FIL2zj-8E^XgazhWIJh$S>gZ~jJ3GP_kV>&4hs6)nJf>O_we-N(`fe6r-L%!61}VQFx;kc)!Y z;(?TxVlDYoU!3vilsMun*aaA{VpOKu($OqXq7<>|{pPUQu$@bFW#cxtLMeJ4_O0$f5nmc|j|ttr{4DdM|cwH+6 zjjW!@c;t}cYCq3KDkE%_ERi+2(ChrP)#`p6f|6E4>CF2hEH+ub){@n*O%vg}K+x;VpKG;&wFyMjr-UR7VT zq$YmV5m&?~B^pz|$%325q*T39m|2I2!H9}M&=;2PbC&2v#pth7HNaL5Pzt`%Nm&Fc z<@jHjk(-K+XWA!>sc6T@?#%-M_3}+qTLAe_T1cAp4hNFz?L4D3VDxcGr9nH3?L|#C zA(15_JRx#~6{TZTG00V0+OUavQ5tVE0Ed;Fa_9G*<5fZBbs2=$7VWqTGTqgsncs4bqCUD z=6`7m?tBNV4Tb)!AjiJ7SNV1{e9>FG z$WEg$9$9`rcF!zoiS+DXzeGWJ!&i0g>915%^y@I^3$D7}%}KN8PNV{JWvErV~2LlZ#e zo35L*%WNm%TM-guqL;3^hap;D+$yv%Vd9b7A{4Jer>s3)16})N?J;G;J^5_e!YyOW^TBi|gVM#_nz|M?}ZkzNizLs5(QImjTybXG| z0g@-)$nLfkkhtIhhdd^4VeO<9(zd4~=qxW~PsMEr{{Sg*D|x}AFIVZ5{o@LkF^Y+4 z@~Oh8p(gjYh(G{xaCPx%ll8+LE1^7T;q=A2mCz3szc}_Os14}e$qg|TH?N2nkB57D z9L75>jVC1If|P)^Bx=Gab4lCj8QB@Q89gEZ)(g?yO4NUFiNO?KO^KvWaBot}{x?jV z4o{ewqu?fGP2yj(iwZQW1CG&_a<`>PRxykESHYvR9Yl!O2_Q#M70w!bV)lol2hC|? zAR(iiDr}u8X8;iI;#F|+s=ZEN7JBVzFM_#_AOS)#tpJpsFnoy%6tbl%LKlt)c=eel ze}W!g)6?=;uo#8lBQxsZD~^o^=UMX2Eg6640M=R)#^XkZ3)Kcv_tRDcjZbVVJiq_z@x5M>G*?#Q~m0}E#^Fnd>t zAG&kOO16FG5H-Wbr_3b;$A?&)3f1XyE70t7;TRdMAsV z6_^IKp#qNy*7X3MLJ%=t?re9D*f7dKb?FT>Wnw?!+yJXJkVuTeCNp|i=bwV8!J)>T z{=SnsYqqVi)!&t>?xp3j?*JAaU7wK&^g4%?@?KVUqThr@WeJ)kHDka;wugP3S5}~g z*}v+O!HPK-JpCPe!PyU6TOgai!Eg%Rmsf0sVWouZ0PoU^D{+^7$TAAejCtW4K=F6N z>M~lKJJ23(Y3iFPkgc}s>o@jN8QCisD2 z)9WQ=2Xo{8-Q}TOd@^38h8_`7SHG23%{=(cNt`kmfxNZ#*!Jis4_o zecNWkD(IL7a3-t;MsctZ!Ay&hmAk-w zWU^>AKRkt8XZ(91h15EhpqW$Sz}hRRYqoKi?0+b{MU*a9LzlK&pg7=dEN_qmcD>~j z)Kasi21@=uiR2||KMI^E>H0I}yMe7-X~{9_IE?D$>b!@ZzR5@yNn3bw?;u>Gj+`Qw zGcYdw1bbW0{YRqC#Key2HQ8o-6dcaOC5>*wM{=59!`V;Z06;$RLjk?m7=lj}_l+7& z4ikD=8j(J>?j3@oIxZM0e+`%NTTxQowjh!k>dy1e1LY@j@(@|N2$IV4-3p@&W!R4p z;gOIqCo?DQLTCWK$Stx2sDP0@7BcvI)gtd~;vXql5X+y1ednCa3J;kO@kpLct_qoe z6o%zZN8-SNkNWNpIZ=_%U|@lDAY+7F5RW!89-#HDpduKxl%Skt$*DY8^oGTdZ1fhE zBrvMN=!TWxhc3A5`I}Z%S`=VMk|IU9EOlI|Boku9c@HNMM#f7G>|Rp^tGX;83K%%> zbx`EYHd3d(OILGjf;!w+bHVepGNhAnkUPL8YT#j+b%kWl>_DQ80U=Tz1;dSxT0(|` zS{xObg!X9X4H~gMEwJ$V1-`x{KBV9|RBE?AtzUGtMr)39t~!e~(~Cn{&T0D$$#8IK&spF^@!Bu!?_J zdQnq2qb=``7o4%mpfyp+C*3l5bDH?!4+aRkNxN?GqdWfqY(5SV{{Z)Wg@8VxS;#Ul z1?jdY-#(N*X%R5cs+8^ES0L2l24JB*n4}cSdBt@kOJvY7fQ|6UKGYtS3C&YaT6FTj zMfZg0T#^LHG%(L|&Kz{o8BLvi{tM+{riU%5YbXv!G)+v_b<2jdq3U2O0b?On%Q~5W z>$78ZhF0ZG!DdQ-88qGz!6cRkRon|jh!MaP9f4!oR+nJf*(>I2ar%x7BSq0@$Dr2j z7A)jD(Urn@`-T-`jmX+q`fm(DjIfH;5v&;)P9Lz5RK&NVet4Nbes-#UGI5`@-b$kY zENpqDRSF&Mz$a>@q{hYQ9yTwfmm zJpd6SG!K?#ld~DPtP%EWHT~N36iZ-lhmr8+P}-C~-0Am2%fbBO3Vf?a94BL|&`YwP zH~TOJrL?i2s5nFOL}a4OeC7*eEs%Q%paM*W5l;5~{Kg@LC-dI`#uA0$XjVi9_v{w< zY0xJOeYr8Dla-yO(Rm$*m&2It*D(X=oE3z==stqBK-h4(Bw@#G0@P^Q)zkwZd@>Q9 z(qL{wZUmf~As$|G;MNkqlrsFiJUbuVkw~RyFvvt&`<a++(h!{OXuT z7#_2V7B&Q}I#fm6Z1BHH^vn7CIz5HV!#UA>Qt)Y5#gI&Q$g~#k7!YF%ggyTNI!$IR z<#Kc(w2^I5HX1}0Pd6S!RcuHDi~=XyU|@~w_R9Ku;faL}*oFl{*^f6OLL9eVR!LTt zDRtkC;la}=A_U}1n!Ju+wl~9r;sYB(UvV{^JdWAY%5MS zIb1t!RW`G}_|GH9=>=xhXi(K&tKf- zVp6GmWGyQ}Z-pukfI$$)m)#O!h97onJVxP)N`>aH1lm&RKRHRV2-JRr;l8zv`;@|V zZ`D+G%c1FVsKb~ObD?FPy^~+H!104*$&~PsVi;eC5^zAl5(-NOH(oXbWMZ!Optd+CoDQe6H=Io( zPjmPBoLGm*M0CQf>uFG>FufSu;WBpW9e|tjw^2wRKgBRbc$Wj4kDkTPV>i8OJpq)? zL=)3{r*>7Ha;MQH!yaT?`zeTJw08P!y@#@f$s#$1p44QCu2rsCxxp7#2d1coMC*+dq$=+A+jc{vYqTiJ=s8vI4rxj5{!s62SIlF~o3 zUD0N&C^|J~qruMUsI*l$+_F$!1|tQx5-Pl>8tIT#Y6p*&OrWC?-y$8#NzYY5m8G~_ zoX^;TD}=0hP=i9ripoT^=sKFftBqayz_<$>I#1qE2oMv#YOEemXs%k6VqswnnWt{+IH1*1{Qb}BQvA4Z`r@(N1X^53mmG+IH1Zj^w%ZY zNbL*qVu`DEA?|R}DHY^A_&9`>uK7a<>w^H31@}~FaCAm)9j0+AdDFG~qd-Zd`cQ@C z>Vk|Id{jy{eTGwSj(uEXNC)>ZS}$+HzA)2v%aeV6kG+F@5Z;tVQ?^YMyK+DU7vLlb zJi?=znn-%FI~imRF_4N6u<8da_)_O~>uVT@AYTCn`UPlRW)e~6M_VdkcRS8DNNoTS zuHpRjzG(p2%vf2_l$EfFh9as8vr$~2q!KK?;o24&Xsy|kS0^b3R z0f!Dic?qHa0D4U*!iq6%H&B~#A925)yr2c9XKu}Y#qTsWc((S#rQM4{^V2p6DJR~{ z6EM6+xJdIW2=P+`^NyL}B6Y$=<1}piQKbNQ8B#4m4elxBk*si;X5A1h0{nTQRh)HT zyV`z<0b^?SdHe6XuAZJ)Q526i@+Vq#HxlVKyoV=b~}nQxbG2azWy3 zT4UUFF%Adas33+nIH8P-*u0o0v|;`66vgfx$g@|d@G0oONj2=_LBo0=XituNl}4~K z6%(r+@GQWd=%6XZ-(kIjX)s!}2ZOKMKOc1qceh@g+Mlnb_jB^!Cy0Ju{vrd%BdHy7 zF;S`lrDwSzJAGPpxGTRF0o1~o2lNk+@3S=uA4h*sIg;XJ-H1XVH?;R>(EZTUw@^9k znxAUMW&XqWLep&IN1Z4E6;#kUKB6j$8LI=_k0&kmsJ}E)PK`SCc#mRTG3&2=J{gfp z$Ru}aVnT@AJbTMQk}6Oc2LpbF1J;2J)}66iA14qF;P6&0FABl`0Gi#s?h7JwVxakP zHm6=m0Goc&zVGvzd~nc9F&%hfxl+wH1DQyFmyMk=F078h5I@OUn*G%QxaVSlqXV&s zJAQym;!1cmzCcxyfLYN;QL;H?jaQ%O&Qm{Sh-9h>8td{CqSK@vknPk31KbgA!F#>J z*lOHa7}p%}B~IIL^YQgdqhl0jP?qFFR*9MiQnV&;Ybr@P$E+QlrS`&%_z?B6jY7UH ztJ~aaD3I> zD&(9{f}toDtkyT*=S`bnp5l~-zQLp@U^tVq&Ofw$oG{K35MccL9291Ncz`#NWD->F z$Ui~Ps|?4I+ZGSby^LLz6OC~M>|_Kj^bonjql@4v69hg;wdkS1q(_WW^ZGanjgwNU z#LUqoCy;Phun*rkaTclT=5!^PC)bR${gAA`G4fg?;mF0zlgS=y!3B&&9}&r2EwZK~ zNOJBPG+g$wBw5{fR<#^2i?^Q<4cADneyD^n7O^ zAjW21BIqq1)9guA++0bdEyxCw9j~LryV6l9Rsr-+l@G`>x8%b9m8y&?;Y=QMR=a=T zJ~ls^RDCIv~2E;dh#be4GC2knwfYC=11vSP4PF zn(FA6*mM!Np#bY4g}9?h1n2bMH+6#$Y>dFOU~(eZv{p`tQfGocsM*OS67PRZA}^2}8e~5@I@MAAJ{Pb+D6Jw1k#JAq2Men^4`~7}>J-L~oC*bl zB5qa3_*HBo3E-IBYn1dci1BF5lyu5{yShYf;|V8x9DeoGwdM(a0CC`6$+gv_6iE-N zZSZ!~{^g(5#Nwvqj5>oEWRJ*02a{2v(uLvR#L7*~4g&?9Kk1CIDdjp)W0n@JB22n~yPB zBRBNi(m4A2fK%1KKw~C&Tu_6SU<^-&1YwE?

E00)2B0;#ujpfI6Bw)q=zqN?u2p zOHxTi@Oi*>e+K^m_7>alb)D=Y_M?km5Qk(4iQ@SH3SVh@>eiJ|+=(Y6r%MYMI$%OK z36k)FGQ6kHxTdfupCIX0Q&B`dSLpUo&N(I#i2;5Z=kG)d>B3Yg1BszlSdP?P{Epw*|>#npB&VgNVUtVZcZ0wS?BN(r=pG?%HeV9isd z9c9LL$%<@Ry^;K=qBjA;lPB*Mw5qjahE^w}KuB|4RzJ20ajFt(WK>RszqG@|aAWBs z>=Jc=(AFlrTS0{RION<*b|V6BMg@G21|JHx$y>t_bWpccZ0MBov9!74*Wh8n$t^>q zLOQRLLYmO01`GJUIKpFFK2R=0DP$-d=}%sk*Sp3*tS(u|*03g?BV(UWws`S7ko2<{E0RI$qJU7Gi? zM+AH{9tjJkzd>U|31)o4&={<>9G7&$(9KJd<$l_J9-SqM4xk#=d9e6+z_2DY0&{Cl z`yf?qQ16bzwzIwe0ESaa$QW!WyRrWOFixSmm~`2`$G3kUxL`uA6c`@36lQ^q%E0PZ z>LOBA9W-z>Y3;-80<+ZLys=&8SZTXlA$2j#@y(Be9N4e&F)F&^lnsr6UyMj6t#e;ChG{34;D3MC>r8c>RIZU*SqLaMnz8XcFZkU zSYrTJ5@Fj%nF|GI<7Ve_ybS0$aMzgDR&UM~6NO#3qERUc5T-nlrjLL2>8qKF1$#3{ zu{N<68VVxL<1^;uxjFv;@=mFcpZcyJ*Y9_p5Vn|E zR&CohZCU<)z?O6Ao=kjIO4L&s6~_!$w?>}J{#LQ3EjI|P)6uVatA=&QS2ZI6(}4n5 zNt#hs6&u9nr|S2e``9RwNuu5mizr=iknVYVRrB-AD!w4i=kz)J7Uc7cD8O*BL=v#_ z+l$)ab4_@*ggmltg~XISF(I@reib})#mtjE-Rt4dA*1S|qF}D!yYShCRL0ED2tDH^ z>ttDPHL^6yl-$=*=?r+fZ`1M8s0!Cj=Xw?s&w7__@nsMaj$IVut5-Hp{_BzyPUVR` zfN9fyIr|1*xH}|pxWNM}$;)DFb}c34cf1wb--l3k2}cv#>B#c573H+8+tPrIQ_XEG zc$E?A9sOSM%Fh`{xF-^oF2#w%)N*E*@%p}XlIb=ZrMj|+LK=5PR5_1~{1r764Fqni zKv#AAJtdG4e0{KXx{FQ>t6tr2|t19Q;z=A zOapRpjxJ1XhBs=;;6iJ8HZ#^qWCV~wZuOi!UnI*<@7)J0bBi~Y6hJS#5vmAKElNBA zJ)6++p@<4vYpvF$>SH&2#-#_^^IDMq0PC2S#M6v>lhYAgkgfN;m!MwI6~DDK==g+( zO7*}v5t`#kpz-*T;*PRLe92pqN4YNrqC%TPFztq{4g+)zGbixgAWWjY(yOtw&8AZ| zd0o8MG3=1m&aT)D{{Y&Eov}jzvLZ-iIl(g$>8zOpSS7M*j`Vk53+R?(ZqC|kQ6ps9 z?l%{l6>w~=-y*>pI%#k%*FnxaO|#v@AzI=Q=-MM#pKSXgM)oBrW3Bi#2p0$ z=`o#?-F9#x#dDjXAu}=JqXjv$bGT;H#e}DJ!|e2&!6G4~XRKNX?kDxl|c_W&5)xjHERB(L#~&qwX>OA@m$+9?-&}kAdpA zB`$_%FjbT8;SH*E9g&`dF6M(5^kngAu8^qQC#s0j2o|h1`Fd2`c?1y$P6IOG6V}3X z3DIDWc~b@imsVPuIKGZ3AD99v#n?pLh%-KSS3+fQSeZr7(CYZ$UPd%T3&)l420h&+`i5_{=?Ol)8tj& zyIsWzc1B#0?x8q+EcvqpakTB1yV`ii2hs`+u7Eb)b_sY31<``u<}1S;24nodtAyA~ zF1zjx{G$dUF-FI2k`0MfNqv`Mx?>1bFr-$&Z`Jaz%~b ze0!4h!JJ@`eCwihw-L!vNRXadzZi34(g)MZA_I;k-XMwfU3zJWvyC>E&*P(G4VQ9j z%UP3*lSw}&y!7I#jzIx{;zb_k1Rs|BFyl}&=D)Y}Kvt=4pGKCGuE!PUHoFyn2xCNX z9Q*G$z*DaTkTiYtqdf{RnrP>(hhb~=4dz2WcbQa*t1E%y^kakMsi75K&4%Jo&_8A7lZjbA_4N`Ly zvTuWgr>&B3XY;7nl+tW{Ci%>&7?Bi>Y3xg`ADjA9X4svDQho6$Y5*07pI=+U*w=x4 z)>M9!1V@=NXtZ1R(4hyQ0LE;lfC`i~AvVGpV}dj*uF!9I{-g^7#aKd+>Mru}#2Fs7 zathOX7PL?mt{|(vT{+-s3zVl6OrsU4@+<6FyXSAHr3Gks7 zqbO!UUAXfnc33LSrLH;I%w4>P;(1KF`%?}FxegS7^QwcH0j*9|@Ku#C=_XM_>0d4t z!w?i0+w2BpEro-WC~Ht9lu!~e6E-#7W~v3KsSQ_uR~RD8A1ia?IK08KD_I4`zpyN- zz@P(0-3dID*tnBtpGv`L@I9sn2n!+Hr{f}0Zgey_ky7~!XBE=Mtc|>JivIa-hYDnC z4}>-pBHEM+4_E0+v6@Nlz1oT7c0@5A=viNn-yj)?qx=#%UP1?2)}iVI5YA|LQobdD zpa%A%mmz?t|btKb8 zQ9Bw_wg#-DdPXwOfBa@aBIB&Pll|(ds{w>Hu9+X3yl=;DYCFCnL**6WOf0AElT`iv zQvU!s@S|)ge?itZfXr>e>=U^WTqd6MHJt5b8KIv!v0VC5CV)UMNRJzdZb>DQS)#hI zP9T0-xxH-m!A(}T_uk1mx9__x%f;|uWEm{;0ZTmaVW-yUwVe*`)D?uCp|hGC68SGAELj1(mJR z8N&EO{;`cEpIMG(MF>gA&a)72QTSC(^ zyD`0bAGy*ae!NDy#q4fb$$=jK0AB)O&*;5=;m~eXK0PxD^3kv8gYk;N#CZpV!<*pY zh0ZW^XSDLx%+sJGA#D zgCU`~Q`<|Lh;f09RGBofBP$5{JUV)6+$Ej?l8gZzu}rI%$o(EFDo_6aH%?Wf$x$+S zBB+7oQ{Y$yrujyIPpFQ~Lov>9Yzzi`Fg*dStGH$k)WwKSpTu*6hpbi1^EdAu$LcS`Kz_AB%5ez@VF=FX|13j+yi+#KGS^ z09I3Bdj8+Ka73H8?Plw7kL5VtCO|3WcX4*-f^@tjJ?Q<> zFjuu=QSw%^59*0et2+7lzL&N4OJ=NP{=N-!%YMrkuo z!J)U3;@W72)EU%`88<|AX+S_gf!q_x6w@XB;SyJ`6|MrhDCvT_1mOG{AX>IVY`h?> zM^r18?&MX(ED(@j$dcB$v>o@A_1-pKZFTD&WRToU)Bybc{BP?#cQhH07j=pz`+V991@ehXd7(p=99+b*kL-`EaxJ zvUZSMAj(Cq1GK3&B18_n{E+8UipWxe(I1z+Vp_2|tOqNChBs=+-$7$@%8~fy<7Srt z5=;yeMhXB7Fs)6x5YGxUhyp{1iI&Owi_QfQUuEEKREZ)|r1eu7oq>D)w&3G?z$*ws zgpInQUI{_02@G)VEWchGQ{j=b0$eRT#92xp-%mms&^6Vn)D6gXH6sZ#Tb)aokGo`* zISLU+n4F!T;k%xGa8y2kJ?{5KYR*_*xr!42@nRjZyc9kPjsjLkiAfl%zMygm{$5WD2aJiugZPP)(kO{hNrY$|UG#1oKyhv* zPg!iN*g2t)IaEl;9~5dRT#DJlAIo2VD-VOl24=gEAH|EkX;E<_aKeJp%EoQ}_x#)G zJy^xph{!+@araYTtSCP*sxhf#CL=_{XOeD7)e>?6ADMR}9w)*z&^kM+GreVl%?zhY z<+$k+fZ*P6@8u98DP<_rUP#~D3vQFP)Kdrf+2hsyIDZ}d<|EOR)N4L5)YNob^g7gc zKe~D_WPTG=?HRr zkxD)#6l6KKUjPX!-m>`2Vwa4SS_Uu$!;Fa%p;q#zdM#&y zWbxI4jU1xg!~Ne{8w$blqp3wBXh$rcd3=nydE=n7zY!`%KH;by(G{VjAW9@77VSv& zJq!H2Q1@gfe(gx6bgq+hlC1X6GL+vOz!GRkF9nGQY$sL1Fr~=klu}#}JSPUx01Eur zI1YbG4GwqFJ^D4F{(buzN|)Hs71$c4m`~lf;&P{_Y)4l&&LW-t4s>#DcH#`u@05j~ zo|G9znh}1um_#8bB3rB^toopY8sp1NH;EczNO)R#7g^VAj5`vP(RZ+lob~g*WZO>o zCrz@L!V(HHA}>6JR}PkIW1vrCZ*YD;DmEwjf`MlrxJXSw$_8q?bOcM$PmKk6``zZ#>&_26Kd98xz?L`iDSAmf3uX1lWyaf|3 z%s!yhI2tqkExNN}ho4#=w4dFnsrVF1FHndkQ?z4C+(0U8SOA82bKjgk2PYWR;quvj zqjL>e7?0{Yq5&^I1X*g&Zt%iVdicN`bDux^INDdhmk7;{ybD(;D|QD`E~3O5YpnzLFvCUa!7=YV+}tLIK1ER?*z2n zWXO0fS<4tK4lk55gO`>W6r+DIJK(k8-38XVq~R8}7H@r%k1lM#WtOu1S5}G{VdMta z4bfkNl?r&HjHk(1HnuTa`C0krV47x;sfLW*>7N(Fif&23;-dqcrLN~s(hxd6fDB?e z>ZqDW8l4`BqG?rdFeinml>1-g%BAP7#P9`)dIe$V@J*&V{{Rcc5l;`Wa}lOJ*CXCp$%x?;Zy>}M~>u^@+1RL*xLZXkt|i_Y!F)CSnrGrD;l%?$G?bD z03fc_1FsORaS$SW^s_=SY3KMjupcW^E)QR{BVuZBk(;f9;M{qH@}nAIIA>1Q+FY7C zB?rA2;AgcsBG<7#flx}SyxFhRNjsNj?RXHXa@s06?BT#=atc_bPmy z?`B0Cvc@MC-E#w%BxKS*HjJ?)P#F$GeW50zYy3~@@L6{M0AZgzIuz(}Vnm4zjXi3L zRM%_$15-7j>F<=iHv%a*pU4Gs8=(lBa#Bwz2tHL+{~Hnm~)bkZ+Yw1u{XFv$-7K{jAg3SQxjy%k}v zCye&63`v6d5^k)|TtG_CatMGoWjUXcK$1{VQly4SK1`AZGZagb3A+eBNJNU9m>zab zQ**6cMwijgR)*`6&G|QvKMfV=0^FEl!`_HPaH_6A?Pd5L8j_*#gNgPH9)jV>EmT_o z?OfHU-gO2ipi`u|j>2Nz)2^bGnzFS~F7!j6;C21cr3hshuDgs&pyg}Tf zW6gLH_AIs?*AG|Wx^r5va#P<6qKI{tIStrd-*!X)064CoMzyXX2s&VHxWm zG<$W82{m;pQ`gSQFApzax}t36Izp5@;WiW7b}R~FQ~=}V_Ko24(pOQs_-BN|6dD|( z6(3pc;R;G-b6;<#c}qR&Esj^=x@Et@U>dY2L>_ACQo(iaYE*rC(SgyL1gB<%@q6+M zT_oNsFB0c%kD$;|0O+W86q3iUvhRAc`WP>3tYXGeL$|N@Ev7Ju`>p&+1>` zrgK}>q$u+&f^7W{y-77AZ|4JI;+lY97DpX;gtHUr80$1b&*OJ8e>KiUxUjQ5)z2)4Y{ z1jWlBA!z1Oud;;<#*7Hjw)vyI;0t-rN}wrE=w}a;Q4x2)*uIG2_w1Ut~iJGg1ihWta@le<=sGXw87gVO+%w5#lp1@d#0L6~eoEhJ3+oAhc1Q@xMJ!sy-H9@M9 zE=9g6yVLoInV`Q|_g#m8ACW8$4_G?s`-p|8<0%PicE%v(zmY%>&p|SI(SEyX9r-cO zOlW30Ss^N*+ggWf#v6HI>lUZ2@1`fYrP≈6BPBMtmjV4_1Z8BL4ue`BZmvX~0Ei zP#Pl0Gsk`+@sx(9V-~iN3oAA4fm4AJCbr;S27#=iGeIX2TGljr7@z7cY{)qlRu821 z8##67^dp2J9)%YpqiTQ<(j(h$6-(FJ{GXpd&CVqXNd&0BGBM(CKKJRP1a1(_hg zKY12)8?$Am*3i3h(nSnaKNjvRNQ@XyEWWZ$G7)xAFK8HEdCPm;m!%FdA zmO`o9mB0^+Mn9SeaB18n2VAnKB3JamD?la-=CtISv(=#mz~e)4y9}|$TzN-_fn8)> zonDm(Nd Date: Mon, 16 May 2016 22:19:20 +0200 Subject: [PATCH 29/58] Replacing the balance parameter with `content-weight` and new parameters based on suggestion called `previous-weight` and `noise-weight`, each can be specified as arrays on the command-line. The blending is done after matching the patches. --- doodle.py | 42 +++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/doodle.py b/doodle.py index 08de7c1..fab77d6 100755 --- a/doodle.py +++ b/doodle.py @@ -36,7 +36,9 @@ add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') add_arg('--layers', default=['5_1','4_1','3_1'], nargs='+', type=str, help='The layers/scales to process.') add_arg('--variety', default=[0.2, 0.1, 0.0], nargs='+', type=float, help='Bias selecting diverse patches') -add_arg('--balance', default=[1.0], nargs='+', type=float, help='Weight of style relative to content.') +add_arg('--content-weight', default=[0.0], nargs='+', type=float, help='Weight of input content features each layer.') +add_arg('--previous-weight', default=[0.2], nargs='+', type=float, help='Weight of previous layer features.') +add_arg('--noise-weight', default=[0.0], nargs='+', type=float, help='Weight of noise added into features.') add_arg('--iterations', default=[6,4,2], nargs='+', type=int, help='Number of iterations to run in each phase.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') @@ -366,7 +368,7 @@ def prepare_content(self, scale=1.0): " - Try creating the file `{}_sem.png` with your annotations.".format(basename)) if self.style_map.max() >= 0.0 and content_map_original is None: - basename, _ = 'poo', 'face' # os.path.splitext(target) + basename, _ = os.path.splitext(args.content or args.output) error("Expecting a semantic map for the input content image too.", " - Try creating the file `{}_sem.png` with your annotations.".format(basename)) @@ -470,6 +472,9 @@ def iterate_batches(self, *arrays, batch_size): yield excerpt, [a[excerpt] for a in arrays] def evaluate_slices(self, f, l, v): + self.normalize_components(l, f, self.compute_norms(np, l, f)) + self.matcher_tensors[l].set_value(f) + layer, data = self.model.network['nn'+l], self.style_data[l] history = data[-1] @@ -494,34 +499,37 @@ def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ frame = 0 - parameters = zip(args.layers, extend(args.iterations), extend(args.balance), extend(args.variety)) + layer_params = [args.iterations, args.content_weight, args.previous_weight, args.noise_weight, args.variety] + parameters = zip(args.layers, *[extend(a) for a in layer_params]) # Iterate through each of the style layers one by one, computing best matches. - desired_feature = np.copy(self.content_features[0]) + previous_feature = self.content_features[0] self.render(frame, args.layers[0], self.content_features[0]) - for parameter, current_feature, compute in zip(parameters, self.content_features, self.compute_output): - l, iterations, balance, variety = parameter + for parameter, content_feature, compute in zip(parameters, self.content_features, self.compute_output): + l, iterations, content_weight, previous_weight, noise_weight, variety = parameter + desired_feature, patches = np.copy(previous_feature), self.style_data[l][0] + assert previous_weight + content_weight < 1.0, "Previous and content weight should total below 1.0!" - print('\n{}Phase {}{}: variety {}, balance {}, iterations {}.{}'\ - .format(ansi.CYAN_B, l, ansi.CYAN, variety, balance, iterations, ansi.ENDC)) + weights = 'c={:0.1f} p={:0.1f} n={:0.1f}'.format(content_weight, previous_weight, noise_weight) + print('\n{}Phase {}{}: variety {}, weights {}, iterations {}.{}'\ + .format(ansi.CYAN_B, l, ansi.CYAN, variety, weights, iterations, ansi.ENDC)) channels, iter_time = self.model.channels[l], time.time() for j in range(iterations): - self.normalize_components(l, desired_feature, self.compute_norms(np, l, desired_feature)) - self.matcher_tensors[l].set_value(desired_feature) - # Compute best matching patches this style layer, going through all slices. best_idx, best_val = self.evaluate_slices(desired_feature, l, variety) - - patches = self.style_data[l][0] current_best = patches[best_idx].astype(np.float32) better_patches = current_best.transpose((0, 2, 3, 1)) better_shape = desired_feature.shape[2:] + (desired_feature.shape[1],) better_features = reconstruct_from_patches_2d(better_patches, better_shape) - desired_feature = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] - desired_feature = (1.0 - balance) * current_feature + (0.0 + balance) * desired_feature + better_features = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] + + # The new set of features is a blend of matched patches, input image, and previous layer. + desired_feature = (1.0 - previous_weight - content_weight) * better_features \ + + content_weight * content_feature + previous_weight * previous_feature \ + + noise_weight * np.random.normal(0.0, 1.0, size=previous_feature.shape).astype(np.float32) used = 99.9 * len(set(best_idx)) / best_idx.shape[0] dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] @@ -532,9 +540,9 @@ def evaluate(self, Xn): self.render(frame, l, desired_feature) iter_time = time.time() - desired_feature = compute(desired_feature[:,:channels], self.content_map) + previous_feature = compute(desired_feature[:,:channels], self.content_map) - return desired_feature + return previous_feature def render(self, frame, layer, features): """Decode features at a specific layer and save the result to disk for visualization. (Takes 50% more time.) From 6358992789a38425d2f597d443ed346fd9c99e47 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Thu, 19 May 2016 22:58:36 +0200 Subject: [PATCH 30/58] Minor tweaks to filenames, code layout, adding prototype content renormalization but commented out. --- doodle.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/doodle.py b/doodle.py index fab77d6..bd6f375 100755 --- a/doodle.py +++ b/doodle.py @@ -36,8 +36,8 @@ add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') add_arg('--layers', default=['5_1','4_1','3_1'], nargs='+', type=str, help='The layers/scales to process.') add_arg('--variety', default=[0.2, 0.1, 0.0], nargs='+', type=float, help='Bias selecting diverse patches') +add_arg('--previous-weight', default=[0.0, 0.2], nargs='+', type=float, help='Weight of previous layer features.') add_arg('--content-weight', default=[0.0], nargs='+', type=float, help='Weight of input content features each layer.') -add_arg('--previous-weight', default=[0.2], nargs='+', type=float, help='Weight of previous layer features.') add_arg('--noise-weight', default=[0.0], nargs='+', type=float, help='Weight of noise added into features.') add_arg('--iterations', default=[6,4,2], nargs='+', type=int, help='Number of iterations to run in each phase.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') @@ -516,6 +516,13 @@ def evaluate(self, Xn): .format(ansi.CYAN_B, l, ansi.CYAN, variety, weights, iterations, ansi.ENDC)) channels, iter_time = self.model.channels[l], time.time() + """" + # Remap the content features onto the style range, helps with blending primarily as patch-matching is normalized. + smin, smax = patches.min(axis=(0,2,3), keepdims=True), patches.max(axis=(0,2,3), keepdims=True) + cmin, cmax = content_feature.min(axis=(0,2,3), keepdims=True), content_feature.max(axis=(0,2,3), keepdims=True) + content_feature = (content_feature - cmin) / (cmax - cmin + 1E-9) * (smax - smin + 1E-9) + smin + """ + for j in range(iterations): # Compute best matching patches this style layer, going through all slices. best_idx, best_val = self.evaluate_slices(desired_feature, l, variety) @@ -533,10 +540,9 @@ def evaluate(self, Xn): used = 99.9 * len(set(best_idx)) / best_idx.shape[0] dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] - err = best_val.mean() + err, frame = best_val.mean(), frame + 1 print('{:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'.format(frame, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - frame += 1 self.render(frame, l, desired_feature) iter_time = time.time() @@ -553,7 +559,7 @@ def render(self, frame, layer, features): output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_shape) filename = os.path.splitext(os.path.basename(args.output))[0] - scipy.misc.toimage(output, cmin=0, cmax=255).save('frames/{}-{:03d}-L{}.png'.format(filename, frame, layer[0])) + scipy.misc.toimage(output, cmin=0, cmax=255).save('frames/{}-{:03d}.png'.format(filename, frame)) def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. From 0d8cf11f72651a6abe3166986f217728692dff39 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 22 May 2016 13:21:48 +0200 Subject: [PATCH 31/58] Reworking layerwise code to allow for inter-layer iterations. --- doodle.py | 276 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 144 insertions(+), 132 deletions(-) diff --git a/doodle.py b/doodle.py index bd6f375..688d0fb 100755 --- a/doodle.py +++ b/doodle.py @@ -39,7 +39,7 @@ add_arg('--previous-weight', default=[0.0, 0.2], nargs='+', type=float, help='Weight of previous layer features.') add_arg('--content-weight', default=[0.0], nargs='+', type=float, help='Weight of input content features each layer.') add_arg('--noise-weight', default=[0.0], nargs='+', type=float, help='Weight of noise added into features.') -add_arg('--iterations', default=[6,4,2], nargs='+', type=int, help='Number of iterations to run in each phase.') +add_arg('--iterations', default=1, type=int, help='Number of iterations to run in each phase.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. style features.') @@ -103,7 +103,7 @@ def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floo from lasagne.layers import Conv2DLayer as ConvLayer, Deconv2DLayer as DeconvLayer, Pool2DLayer as PoolLayer from lasagne.layers import InputLayer, ConcatLayer -print('{} - Using the device `{}` for heavy computation.{}'.format(ansi.CYAN, theano.config.device, ansi.ENDC)) +print('{} - Using the device `{}` for tensor computation.{}'.format(ansi.CYAN, theano.config.device, ansi.ENDC)) #---------------------------------------------------------------------------------------------------------------------- @@ -127,83 +127,75 @@ def setup_model(self, previous=None): for j in range(6): net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') - def DecvLayer(copy, previous, channels, **params): - # Dynamically injects intermediate "pitstop" output layers in the decoder based on what the user - # specified as layers. It's rather inelegant... Needs a rework! - if copy in args.layers: - if len(self.tensor_latent) > 0: - l = self.tensor_latent[-1][0] - if args.semantic_weight > 0.0: - net['out'+l] = ConcatLayer([previous, net['map%i'%(int(l[0])-1)]]) - else: - net['out'+l] = previous - - self.tensor_latent.append((copy, T.tensor4())) - net['lat'+copy] = InputLayer((1, previous.num_filters, None, None), var=self.tensor_latent[-1][1]) - previous = net['lat'+copy] - - dup = net['enc'+copy] - return DeconvLayer(previous, channels, dup.filter_size, stride=dup.stride, crop=dup.pad, - nonlinearity=params.get('nonlinearity', lasagne.nonlinearities.elu)) + self.tensor_latent = [] + for l in args.layers: + self.tensor_latent.append((l, T.tensor4())) + # TODO: Move equation to calculate unit numbers into a common function, call from below too. + net['lat'+l] = InputLayer((None, 32 * 2**(int(l[0])-1), None, None), var=self.tensor_latent[-1][1]) + + def EncdLayer(previous, channels, filter_size, **params): + incoming = net['lat'+previous] if previous in args.layers else net['enc'+previous] + return ConvLayer(incoming, channels, filter_size, **params) custom = {'nonlinearity': lasagne.nonlinearities.elu} # Encoder part of the neural network, takes an input image and turns it into abstract patterns. - net['img'] = previous or InputLayer((1, 3, None, None)) - net['enc1_1'] = ConvLayer(net['img'], 32, 3, pad=1, **custom) - net['enc1_2'] = ConvLayer(net['enc1_1'], 32, 3, pad=1, **custom) - net['enc2_1'] = ConvLayer(net['enc1_2'], 64, 2, pad=0, stride=(2,2), **custom) - net['enc2_2'] = ConvLayer(net['enc2_1'], 64, 3, pad=1, **custom) - net['enc3_1'] = ConvLayer(net['enc2_2'], 128, 2, pad=0, stride=(2,2), **custom) - net['enc3_2'] = ConvLayer(net['enc3_1'], 128, 3, pad=1, **custom) - net['enc3_3'] = ConvLayer(net['enc3_2'], 128, 3, pad=1, **custom) - net['enc3_4'] = ConvLayer(net['enc3_3'], 128, 3, pad=1, **custom) - net['enc4_1'] = ConvLayer(net['enc3_4'], 256, 2, pad=0, stride=(2,2), **custom) - net['enc4_2'] = ConvLayer(net['enc4_1'], 256, 3, pad=1, **custom) - net['enc4_3'] = ConvLayer(net['enc4_2'], 256, 3, pad=1, **custom) - net['enc4_4'] = ConvLayer(net['enc4_3'], 256, 3, pad=1, **custom) - net['enc5_1'] = ConvLayer(net['enc4_4'], 512, 2, pad=0, stride=(2,2), **custom) - net['enc5_2'] = ConvLayer(net['enc5_1'], 512, 3, pad=1, **custom) - net['enc5_3'] = ConvLayer(net['enc5_2'], 512, 3, pad=1, **custom) - net['enc5_4'] = ConvLayer(net['enc5_3'], 512, 3, pad=1, **custom) - net['enc6_1'] = ConvLayer(net['enc5_4'], 768, 2, pad=0, stride=(2,2), **custom) + net['img'] = previous or InputLayer((None, 3, None, None)) + net['enc0_0'], net['lat0_0'] = net['img'], net['img'] + net['enc1_1'] = EncdLayer('0_0', 32, 3, pad=1, **custom) + net['enc1_2'] = EncdLayer('1_1', 32, 3, pad=1, **custom) + net['enc2_1'] = EncdLayer('1_2', 64, 2, pad=0, stride=(2,2), **custom) + net['enc2_2'] = EncdLayer('2_1', 64, 3, pad=1, **custom) + net['enc3_1'] = EncdLayer('2_2', 128, 2, pad=0, stride=(2,2), **custom) + net['enc3_2'] = EncdLayer('3_1', 128, 3, pad=1, **custom) + net['enc3_3'] = EncdLayer('3_2', 128, 3, pad=1, **custom) + net['enc3_4'] = EncdLayer('3_3', 128, 3, pad=1, **custom) + net['enc4_1'] = EncdLayer('3_4', 256, 2, pad=0, stride=(2,2), **custom) + net['enc4_2'] = EncdLayer('4_1', 256, 3, pad=1, **custom) + net['enc4_3'] = EncdLayer('4_2', 256, 3, pad=1, **custom) + net['enc4_4'] = EncdLayer('4_3', 256, 3, pad=1, **custom) + net['enc5_1'] = EncdLayer('4_4', 512, 2, pad=0, stride=(2,2), **custom) + net['enc5_2'] = EncdLayer('5_1', 512, 3, pad=1, **custom) + net['enc5_3'] = EncdLayer('5_2', 512, 3, pad=1, **custom) + net['enc5_4'] = EncdLayer('5_3', 512, 3, pad=1, **custom) + net['enc6_1'] = EncdLayer('5_4', 768, 2, pad=0, stride=(2,2), **custom) + + def DecdLayer(copy, previous, channels, **params): + # Dynamically injects intermediate "pitstop" output layers in the decoder based on what the user + # specified as layers. It's rather inelegant... Needs a rework! + dup, incoming = net['enc'+copy], net['lat'+copy] if copy in args.layers else net[previous] + return DeconvLayer(incoming, channels, dup.filter_size, stride=dup.stride, crop=dup.pad, + nonlinearity=params.get('nonlinearity', lasagne.nonlinearities.elu)) # Decoder part of the neural network, takes abstract patterns and converts them into an image! - self.tensor_latent = [] - net['dec6_1'] = DecvLayer('6_1', net['enc6_1'], 512) - net['dec5_4'] = DecvLayer('5_4', net['dec6_1'], 512) - net['dec5_3'] = DecvLayer('5_3', net['dec5_4'], 512) - net['dec5_2'] = DecvLayer('5_2', net['dec5_3'], 512) - net['dec5_1'] = DecvLayer('5_1', net['dec5_2'], 256) - net['dec4_4'] = DecvLayer('4_4', net['dec5_1'], 256) - net['dec4_3'] = DecvLayer('4_3', net['dec4_4'], 256) - net['dec4_2'] = DecvLayer('4_2', net['dec4_3'], 256) - net['dec4_1'] = DecvLayer('4_1', net['dec4_2'], 128) - net['dec3_4'] = DecvLayer('3_4', net['dec4_1'], 128) - net['dec3_3'] = DecvLayer('3_3', net['dec3_4'], 128) - net['dec3_2'] = DecvLayer('3_2', net['dec3_3'], 128) - net['dec3_1'] = DecvLayer('3_1', net['dec3_2'], 64) - net['dec2_2'] = DecvLayer('2_2', net['dec3_1'], 64) - net['dec2_1'] = DecvLayer('2_1', net['dec2_2'], 32) - net['dec1_2'] = DecvLayer('1_2', net['dec2_1'], 32) - net['dec1_1'] = DecvLayer('1_1', net['dec1_2'], 3, nonlinearity=lasagne.nonlinearities.tanh) + net['dec6_1'] = DecdLayer('6_1', 'enc6_1', 512) + net['dec5_4'] = DecdLayer('5_4', 'dec6_1', 512) + net['dec5_3'] = DecdLayer('5_3', 'dec5_4', 512) + net['dec5_2'] = DecdLayer('5_2', 'dec5_3', 512) + net['dec5_1'] = DecdLayer('5_1', 'dec5_2', 256) + net['dec4_4'] = DecdLayer('4_4', 'dec5_1', 256) + net['dec4_3'] = DecdLayer('4_3', 'dec4_4', 256) + net['dec4_2'] = DecdLayer('4_2', 'dec4_3', 256) + net['dec4_1'] = DecdLayer('4_1', 'dec4_2', 128) + net['dec3_4'] = DecdLayer('3_4', 'dec4_1', 128) + net['dec3_3'] = DecdLayer('3_3', 'dec3_4', 128) + net['dec3_2'] = DecdLayer('3_2', 'dec3_3', 128) + net['dec3_1'] = DecdLayer('3_1', 'dec3_2', 64) + net['dec2_2'] = DecdLayer('2_2', 'dec3_1', 64) + net['dec2_1'] = DecdLayer('2_1', 'dec2_2', 32) + net['dec1_2'] = DecdLayer('1_2', 'dec2_1', 32) + net['dec1_1'] = DecdLayer('1_1', 'dec1_2', 3, nonlinearity=lasagne.nonlinearities.tanh) net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec1_1']) + net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) - l = self.tensor_latent[-1][0] - net['out'+l] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) + def ConcatenateLayer(incoming, layer): + return ConcatLayer([incoming, net['map%i'%(int(layer[0])-1)]]) if args.semantic_weight > 0.0 else incoming # Auxiliary network for the semantic layers, and the nearest neighbors calculations. - for j, i in itertools.product(range(6), range(4)): - suffix = '%i_%i' % (j+1, i+1) - if 'enc'+suffix not in net: continue - - self.channels[suffix] = net['enc'+suffix].num_filters - if args.semantic_weight > 0.0: - net['sem'+suffix] = ConcatLayer([net['enc'+suffix], net['map%i'%(j+1)]]) - else: - net['sem'+suffix] = net['enc'+suffix] - - net['dup'+suffix] = InputLayer(net['sem'+suffix].output_shape) - net['nn'+suffix] = ConvLayer(net['dup'+suffix], 1, 3, b=None, pad=0, flip_filters=False) + for layer, upper, lower in zip(args.layers, [None] + args.layers[:-1], args.layers[1:] + [None]): + self.channels[layer] = net['enc'+layer].num_filters + net['sem'+layer] = ConcatenateLayer(net['enc'+layer], layer) + net['dup'+layer] = InputLayer(net['enc'+layer].output_shape) + net['nn'+layer] = ConvLayer(ConcatenateLayer(net['dup'+layer], layer), 1, 3, b=None, pad=0, flip_filters=False) self.network = net @@ -343,14 +335,19 @@ def prepare_style(self, scale=1.0): self.style_img = self.model.prepare_image(style_img_original) self.style_map = style_map_original.transpose((2, 0, 1))[np.newaxis].astype(np.float32) - # Compile a function to run on the GPU to extract patches for all layers at once. - layer_patches = self.do_extract_patches(args.layers, self.model.get_outputs('sem', args.layers), extend(args.shapes)) - extractor = self.compile([self.model.tensor_img, self.model.tensor_map], layer_patches) - result = extractor(self.style_img, self.style_map) + input_tensors = reversed([('0_0', self.model.tensor_img)] + self.model.tensor_latent[1:]) + self.encoders = [] + for layer, (input, tensor_latent), shape in zip(args.layers, input_tensors, extend(args.shapes)): + output = lasagne.layers.get_output(self.model.network['sem'+layer], + {self.model.network['lat'+input]: tensor_latent, + self.model.network['map']: self.model.tensor_map}) + fn = self.compile([tensor_latent, self.model.tensor_map], [output] + self.do_extract_patches([layer], [output], [shape])) + self.encoders.append(fn) # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. - self.style_data = {} - for layer, *data in zip(args.layers, result[0::3], result[1::3], result[2::3]): + self.style_data, feature = {}, self.style_img + for layer, encoder in reversed(list(zip(args.layers, self.encoders))): + feature, *data = encoder(feature, self.style_map) patches, l = data[0], self.model.network['nn'+layer] l.num_filters = patches.shape[0] // args.slices self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\ @@ -396,13 +393,12 @@ def prepare_content(self, scale=1.0): self.content_map = content_map_original.transpose((2, 0, 1))[np.newaxis].astype(np.float32) self.content_shape = content_img_original.shape - # Feed-forward calculation only, returns the result of the convolution post-activation - self.compute_features = self.compile([self.model.tensor_img, self.model.tensor_map], - self.model.get_outputs('sem', args.layers)) - - self.content_features = self.compute_features(self.content_img, self.content_map) - for layer, current in zip(args.layers, self.content_features): - print(' - Layer {} as {} array in {:,}kb.'.format(layer, current.shape[1:], current.size//1000)) + # Feed-forward calculation only, returns the result of the convolution post-activation + self.content_features, feature = [], self.content_img + for layer, encoder in reversed(list(zip(args.layers, self.encoders))): + feature, *_ = encoder(feature, self.content_map) + self.content_features.append(feature) + print(' - Layer {} as {} array in {:,}kb.'.format(layer, feature.shape[1:], feature.size//1000)) def prepare_generation(self): """Layerwise synthesis images requires two sets of Theano functions to be compiled. @@ -416,13 +412,23 @@ def prepare_generation(self): self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l)) for l in args.layers} # Decoding intermediate features into more specialized features and all the way to the output image. - self.compute_output = [] - for layer, (_, tensor_latent) in zip(args.layers, self.model.tensor_latent): - output = lasagne.layers.get_output(self.model.network['out'+layer], - {self.model.network['lat'+layer]: tensor_latent, - self.model.network['map']: self.model.tensor_map}) - fn = self.compile([tensor_latent, self.model.tensor_map], output) - self.compute_output.append(fn) + input_tensors = reversed([('0_0', self.model.tensor_img)] + self.model.tensor_latent[1:]) + self.encoders = [] + for layer, (input, tensor_latent) in zip(args.layers, input_tensors): + layer = lasagne.layers.get_output(self.model.network['enc'+layer], + {self.model.network['lat'+input]: tensor_latent, + self.model.network['map']: self.model.tensor_map}) + fn = self.compile([tensor_latent, self.model.tensor_map], layer) + self.encoders.append(fn) + + self.decoders, output_layers = [], (['dec'+l for l in args.layers[1:]] + ['out']) + for layer, (_, tensor_latent), output in zip(args.layers, self.model.tensor_latent, output_layers): + output = output.replace('_1', '_2') + layer = lasagne.layers.get_output(self.model.network[output], + {self.model.network['lat'+layer]: tensor_latent, + self.model.network['map']: self.model.tensor_map}) + fn = self.compile([tensor_latent, self.model.tensor_map], layer) + self.decoders.append(fn) #------------------------------------------------------------------------------------------------------------------ @@ -471,7 +477,7 @@ def iterate_batches(self, *arrays, batch_size): excerpt = indices[index:index + batch_size] yield excerpt, [a[excerpt] for a in arrays] - def evaluate_slices(self, f, l, v): + def evaluate_slices(self, l, f, v): self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) @@ -495,66 +501,72 @@ def evaluate_slices(self, f, l, v): return best_idx, best_val + def evaluate_feature(self, layer, feature, variety=0.0): + """Compute best matching patches for this layer, then merge patches into a single feature array of same size. + """ + return feature # TODO + + patches = self.style_data[layer][0] + best_idx, best_val = self.evaluate_slices(layer, feature, variety) + better_patches = patches[best_idx].astype(np.float32).transpose((0, 2, 3, 1)) + better_shape = feature.shape[2:] + (feature.shape[1],) + better_feature = reconstruct_from_patches_2d(better_patches, better_shape) + return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] + + def evaluate_merge(self, features): + """ + # The new set of features is a blend of matched patches, input image, and previous layer. + desired_feature = (1.0 - previous_weight - content_weight) * better_features \ + + content_weight * content_feature + previous_weight * previous_feature \ + + noise_weight * np.random.normal(0.0, 1.0, size=previous_feature.shape).astype(np.float32) + """ + return features + def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ frame = 0 layer_params = [args.iterations, args.content_weight, args.previous_weight, args.noise_weight, args.variety] - parameters = zip(args.layers, *[extend(a) for a in layer_params]) - - # Iterate through each of the style layers one by one, computing best matches. - previous_feature = self.content_features[0] - self.render(frame, args.layers[0], self.content_features[0]) - - for parameter, content_feature, compute in zip(parameters, self.content_features, self.compute_output): - l, iterations, content_weight, previous_weight, noise_weight, variety = parameter - desired_feature, patches = np.copy(previous_feature), self.style_data[l][0] - assert previous_weight + content_weight < 1.0, "Previous and content weight should total below 1.0!" + # parameters = zip(args.layers, *[extend(a) for a in layer_params]) + """ weights = 'c={:0.1f} p={:0.1f} n={:0.1f}'.format(content_weight, previous_weight, noise_weight) print('\n{}Phase {}{}: variety {}, weights {}, iterations {}.{}'\ - .format(ansi.CYAN_B, l, ansi.CYAN, variety, weights, iterations, ansi.ENDC)) + .format(ansi.CYAN_B, l, ansi.CYAN, variety, weights, iterations, ansi.ENDC)) channels, iter_time = self.model.channels[l], time.time() + """ - """" - # Remap the content features onto the style range, helps with blending primarily as patch-matching is normalized. - smin, smax = patches.min(axis=(0,2,3), keepdims=True), patches.max(axis=(0,2,3), keepdims=True) - cmin, cmax = content_feature.min(axis=(0,2,3), keepdims=True), content_feature.max(axis=(0,2,3), keepdims=True) - content_feature = (content_feature - cmin) / (cmax - cmin + 1E-9) * (smax - smin + 1E-9) + smin - """ - - for j in range(iterations): - # Compute best matching patches this style layer, going through all slices. - best_idx, best_val = self.evaluate_slices(desired_feature, l, variety) - current_best = patches[best_idx].astype(np.float32) - - better_patches = current_best.transpose((0, 2, 3, 1)) - better_shape = desired_feature.shape[2:] + (desired_feature.shape[1],) - better_features = reconstruct_from_patches_2d(better_patches, better_shape) - better_features = better_features.astype(np.float32).transpose((2, 0, 1))[np.newaxis] + current_features = [np.copy(f) for f in self.content_features] + self.render(frame, args.layers[0], current_features[0]) - # The new set of features is a blend of matched patches, input image, and previous layer. - desired_feature = (1.0 - previous_weight - content_weight) * better_features \ - + content_weight * content_feature + previous_weight * previous_feature \ - + noise_weight * np.random.normal(0.0, 1.0, size=previous_feature.shape).astype(np.float32) + for j in range(args.iterations): + desired_features = [self.evaluate_feature(l, f) for l, f in zip(args.layers, current_features)] + current_features = self.evaluate_merge(desired_features) - used = 99.9 * len(set(best_idx)) / best_idx.shape[0] - dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] - err, frame = best_val.mean(), frame + 1 - print('{:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'.format(frame, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) + """ + for parameter, content_feature, compute in zip(parameters, self.content_features, self.decoders): + l, iterations, content_weight, previous_weight, noise_weight, variety = parameter + desired_feature, patches = np.copy(previous_feature), self.style_data[l][0] + assert previous_weight + content_weight < 1.0, "Previous and content weight should total below 1.0!" + """ - self.render(frame, l, desired_feature) - iter_time = time.time() + return self.decoders[-1](current_features[0], self.content_map) - previous_feature = compute(desired_feature[:,:channels], self.content_map) + """" + used = 99.9 * len(set(best_idx)) / best_idx.shape[0] + dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] + err, frame = best_val.mean(), frame + 1 + print('{:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'.format(frame, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - return previous_feature + self.render(frame, l, desired_feature) + iter_time = time.time() + """ def render(self, frame, layer, features): """Decode features at a specific layer and save the result to disk for visualization. (Takes 50% more time.) """ if not args.frames: return - for l, compute in list(zip(args.layers, self.compute_output))[args.layers.index(layer):]: + for l, compute in list(zip(args.layers, self.decoders))[args.layers.index(layer):]: features = compute(features[:,:self.model.channels[l]], self.content_map) output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_shape) @@ -565,7 +577,7 @@ def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ - self.model.setup(layers=['sem'+l for l in args.layers] + ['out'+l for l in args.layers]) + self.model.setup(layers=['enc'+l for l in args.layers] + ['sem'+l for l in args.layers] + ['dec'+l for l in args.layers]) self.prepare_style() self.prepare_content() self.prepare_generation() From 833889f6c5d04f01fda177a4a68879cabe8c0b8f Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 22 May 2016 14:26:36 +0200 Subject: [PATCH 32/58] Separate processing of layers. --- doodle.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doodle.py b/doodle.py index 688d0fb..de7a269 100755 --- a/doodle.py +++ b/doodle.py @@ -397,7 +397,7 @@ def prepare_content(self, scale=1.0): self.content_features, feature = [], self.content_img for layer, encoder in reversed(list(zip(args.layers, self.encoders))): feature, *_ = encoder(feature, self.content_map) - self.content_features.append(feature) + self.content_features.insert(0, feature) print(' - Layer {} as {} array in {:,}kb.'.format(layer, feature.shape[1:], feature.size//1000)) def prepare_generation(self): @@ -504,8 +504,6 @@ def evaluate_slices(self, l, f, v): def evaluate_feature(self, layer, feature, variety=0.0): """Compute best matching patches for this layer, then merge patches into a single feature array of same size. """ - return feature # TODO - patches = self.style_data[layer][0] best_idx, best_val = self.evaluate_slices(layer, feature, variety) better_patches = patches[best_idx].astype(np.float32).transpose((0, 2, 3, 1)) @@ -550,7 +548,7 @@ def evaluate(self, Xn): assert previous_weight + content_weight < 1.0, "Previous and content weight should total below 1.0!" """ - return self.decoders[-1](current_features[0], self.content_map) + return self.decoders[-1](current_features[-1], self.content_map) """" used = 99.9 * len(set(best_idx)) / best_idx.shape[0] From feebb8581d702a588353d133f2b88802488d5362 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 22 May 2016 18:43:39 +0200 Subject: [PATCH 33/58] Averaging the features from other layers before doing the next iteration. --- doodle.py | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/doodle.py b/doodle.py index de7a269..d488041 100755 --- a/doodle.py +++ b/doodle.py @@ -131,7 +131,7 @@ def setup_model(self, previous=None): for l in args.layers: self.tensor_latent.append((l, T.tensor4())) # TODO: Move equation to calculate unit numbers into a common function, call from below too. - net['lat'+l] = InputLayer((None, 32 * 2**(int(l[0])-1), None, None), var=self.tensor_latent[-1][1]) + net['lat'+l] = InputLayer((None, min(768, 32 * 2**(int(l[0])-1)), None, None), var=self.tensor_latent[-1][1]) def EncdLayer(previous, channels, filter_size, **params): incoming = net['lat'+previous] if previous in args.layers else net['enc'+previous] @@ -335,7 +335,7 @@ def prepare_style(self, scale=1.0): self.style_img = self.model.prepare_image(style_img_original) self.style_map = style_map_original.transpose((2, 0, 1))[np.newaxis].astype(np.float32) - input_tensors = reversed([('0_0', self.model.tensor_img)] + self.model.tensor_latent[1:]) + input_tensors = self.model.tensor_latent[1:] + [('0_0', self.model.tensor_img)] self.encoders = [] for layer, (input, tensor_latent), shape in zip(args.layers, input_tensors, extend(args.shapes)): output = lasagne.layers.get_output(self.model.network['sem'+layer], @@ -412,8 +412,7 @@ def prepare_generation(self): self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l)) for l in args.layers} # Decoding intermediate features into more specialized features and all the way to the output image. - input_tensors = reversed([('0_0', self.model.tensor_img)] + self.model.tensor_latent[1:]) - self.encoders = [] + self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0_0', self.model.tensor_img)] for layer, (input, tensor_latent) in zip(args.layers, input_tensors): layer = lasagne.layers.get_output(self.model.network['enc'+layer], {self.model.network['lat'+input]: tensor_latent, @@ -422,7 +421,7 @@ def prepare_generation(self): self.encoders.append(fn) self.decoders, output_layers = [], (['dec'+l for l in args.layers[1:]] + ['out']) - for layer, (_, tensor_latent), output in zip(args.layers, self.model.tensor_latent, output_layers): + for layer, (tt, tensor_latent), output in zip(args.layers, self.model.tensor_latent, output_layers): output = output.replace('_1', '_2') layer = lasagne.layers.get_output(self.model.network[output], {self.model.network['lat'+layer]: tensor_latent, @@ -504,11 +503,13 @@ def evaluate_slices(self, l, f, v): def evaluate_feature(self, layer, feature, variety=0.0): """Compute best matching patches for this layer, then merge patches into a single feature array of same size. """ + print('layer', layer, '...', end='', flush=True) patches = self.style_data[layer][0] best_idx, best_val = self.evaluate_slices(layer, feature, variety) better_patches = patches[best_idx].astype(np.float32).transpose((0, 2, 3, 1)) better_shape = feature.shape[2:] + (feature.shape[1],) better_feature = reconstruct_from_patches_2d(better_patches, better_shape) + print(' done!') return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] def evaluate_merge(self, features): @@ -518,7 +519,18 @@ def evaluate_merge(self, features): + content_weight * content_feature + previous_weight * previous_feature \ + noise_weight * np.random.normal(0.0, 1.0, size=previous_feature.shape).astype(np.float32) """ - return features + + print('features', [f.shape for f in features]) + decoded1 = [decode(data, self.content_map) for decode, data in zip(self.decoders, features[:-1])] + encoded1 = [encode(data, self.content_map) for encode, data in zip(self.encoders, features[+1:])] + + decoded2 = [decode(data, self.content_map) for decode, data in zip(self.decoders[+1:], decoded1[:-1])] + encoded2 = [encode(data, self.content_map) for encode, data in zip(self.encoders[:-1], encoded1[+1:])] + + decoded1[-1] = (decoded1[-1] + decoded2[-1]) * 0.5 + encoded1[+0] = (encoded1[+0] + encoded2[+0]) * 0.5 + + return [(features[0] + encoded1[0]) / 2.0, (features[1] + encoded1[1] + decoded1[0]) / 3.0, (features[2] + decoded1[1]) / 2.0] def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. @@ -548,6 +560,13 @@ def evaluate(self, Xn): assert previous_weight + content_weight < 1.0, "Previous and content weight should total below 1.0!" """ + """ + data = current_features[0] + data = self.decoders[0](data, self.content_map) + data = (data + current_features[1]) * 0.5 + data = self.decoders[1](data, self.content_map) + output = (data + current_features[2]) * 0.5 + """ return self.decoders[-1](current_features[-1], self.content_map) """" From 8eddd557b12bf27b279420d84167b303fc20698d Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 22 May 2016 21:19:53 +0200 Subject: [PATCH 34/58] Re-introducing logging and command-line arguments. --- doodle.py | 65 +++++++++++++++++++++---------------------------------- 1 file changed, 25 insertions(+), 40 deletions(-) diff --git a/doodle.py b/doodle.py index d488041..845fc1e 100755 --- a/doodle.py +++ b/doodle.py @@ -503,13 +503,21 @@ def evaluate_slices(self, l, f, v): def evaluate_feature(self, layer, feature, variety=0.0): """Compute best matching patches for this layer, then merge patches into a single feature array of same size. """ - print('layer', layer, '...', end='', flush=True) + iter_time = time.time() + patches = self.style_data[layer][0] best_idx, best_val = self.evaluate_slices(layer, feature, variety) better_patches = patches[best_idx].astype(np.float32).transpose((0, 2, 3, 1)) better_shape = feature.shape[2:] + (feature.shape[1],) better_feature = reconstruct_from_patches_2d(better_patches, better_shape) - print(' done!') + + used = 99.9 * len(set(best_idx)) / best_idx.shape[0] + dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] + err = best_val.mean() + print(' {}layer{} {:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ + .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, dups, + ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) + return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] def evaluate_merge(self, features): @@ -517,68 +525,46 @@ def evaluate_merge(self, features): # The new set of features is a blend of matched patches, input image, and previous layer. desired_feature = (1.0 - previous_weight - content_weight) * better_features \ + content_weight * content_feature + previous_weight * previous_feature \ - + noise_weight * np.random.normal(0.0, 1.0, size=previous_feature.shape).astype(np.float32) + + """ - print('features', [f.shape for f in features]) decoded1 = [decode(data, self.content_map) for decode, data in zip(self.decoders, features[:-1])] encoded1 = [encode(data, self.content_map) for encode, data in zip(self.encoders, features[+1:])] decoded2 = [decode(data, self.content_map) for decode, data in zip(self.decoders[+1:], decoded1[:-1])] encoded2 = [encode(data, self.content_map) for encode, data in zip(self.encoders[:-1], encoded1[+1:])] - + decoded1[-1] = (decoded1[-1] + decoded2[-1]) * 0.5 encoded1[+0] = (encoded1[+0] + encoded2[+0]) * 0.5 - - return [(features[0] + encoded1[0]) / 2.0, (features[1] + encoded1[1] + decoded1[0]) / 3.0, (features[2] + decoded1[1]) / 2.0] + exchanged = [encoded1[0], (encoded1[1] + decoded1[0]) / 2.0, decoded1[1]] + + params, result = zip(*[extend(a) for a in [args.content_weight, args.previous_weight, args.noise_weight]]), [] + for f, c, e, p in zip(features, self.content_features, exchanged, params): + content_weight, previous_weight, noise_weight = p + mixed = f * (1.0 - content_weight - previous_weight) + c * content_weight + e * previous_weight \ + + np.random.normal(0.0, 1.0, size=f.shape).astype(np.float32) * noise_weight + result.append(mixed) + return result def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ frame = 0 - layer_params = [args.iterations, args.content_weight, args.previous_weight, args.noise_weight, args.variety] + # # parameters = zip(args.layers, *[extend(a) for a in layer_params]) - """ - weights = 'c={:0.1f} p={:0.1f} n={:0.1f}'.format(content_weight, previous_weight, noise_weight) - print('\n{}Phase {}{}: variety {}, weights {}, iterations {}.{}'\ - .format(ansi.CYAN_B, l, ansi.CYAN, variety, weights, iterations, ansi.ENDC)) - channels, iter_time = self.model.channels[l], time.time() - """ - current_features = [np.copy(f) for f in self.content_features] self.render(frame, args.layers[0], current_features[0]) for j in range(args.iterations): + frame += 1 + print('\n{}Iteration {}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) desired_features = [self.evaluate_feature(l, f) for l, f in zip(args.layers, current_features)] current_features = self.evaluate_merge(desired_features) + self.render(frame, args.layers[-1], current_features[-1]) - """ - for parameter, content_feature, compute in zip(parameters, self.content_features, self.decoders): - l, iterations, content_weight, previous_weight, noise_weight, variety = parameter - desired_feature, patches = np.copy(previous_feature), self.style_data[l][0] - assert previous_weight + content_weight < 1.0, "Previous and content weight should total below 1.0!" - """ - - """ - data = current_features[0] - data = self.decoders[0](data, self.content_map) - data = (data + current_features[1]) * 0.5 - data = self.decoders[1](data, self.content_map) - output = (data + current_features[2]) * 0.5 - """ return self.decoders[-1](current_features[-1], self.content_map) - """" - used = 99.9 * len(set(best_idx)) / best_idx.shape[0] - dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] - err, frame = best_val.mean(), frame + 1 - print('{:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'.format(frame, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - - self.render(frame, l, desired_feature) - iter_time = time.time() - """ - def render(self, frame, layer, features): """Decode features at a specific layer and save the result to disk for visualization. (Takes 50% more time.) """ @@ -593,7 +579,6 @@ def render(self, frame, layer, features): def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ - self.model.setup(layers=['enc'+l for l in args.layers] + ['sem'+l for l in args.layers] + ['dec'+l for l in args.layers]) self.prepare_style() self.prepare_content() From bc10b01181c15c23567d07049e092add7b66c2ab Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Mon, 23 May 2016 00:35:49 +0200 Subject: [PATCH 35/58] Support for semantic maps again. --- doodle.py | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/doodle.py b/doodle.py index 845fc1e..fa11a1d 100755 --- a/doodle.py +++ b/doodle.py @@ -188,7 +188,7 @@ def DecdLayer(copy, previous, channels, **params): net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) def ConcatenateLayer(incoming, layer): - return ConcatLayer([incoming, net['map%i'%(int(layer[0])-1)]]) if args.semantic_weight > 0.0 else incoming + return ConcatLayer([incoming, net['map%i'%int(layer[0])]]) if args.semantic_weight > 0.0 else incoming # Auxiliary network for the semantic layers, and the nearest neighbors calculations. for layer, upper, lower in zip(args.layers, [None] + args.layers[:-1], args.layers[1:] + [None]): @@ -196,7 +196,6 @@ def ConcatenateLayer(incoming, layer): net['sem'+layer] = ConcatenateLayer(net['enc'+layer], layer) net['dup'+layer] = InputLayer(net['enc'+layer].output_shape) net['nn'+layer] = ConvLayer(ConcatenateLayer(net['dup'+layer], layer), 1, 3, b=None, pad=0, flip_filters=False) - self.network = net def load_data(self): @@ -348,6 +347,7 @@ def prepare_style(self, scale=1.0): self.style_data, feature = {}, self.style_img for layer, encoder in reversed(list(zip(args.layers, self.encoders))): feature, *data = encoder(feature, self.style_map) + feature = feature[:,:self.model.channels[layer]] patches, l = data[0], self.model.network['nn'+layer] l.num_filters = patches.shape[0] // args.slices self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\ @@ -397,6 +397,7 @@ def prepare_content(self, scale=1.0): self.content_features, feature = [], self.content_img for layer, encoder in reversed(list(zip(args.layers, self.encoders))): feature, *_ = encoder(feature, self.content_map) + feature = feature[:,:self.model.channels[layer]] self.content_features.insert(0, feature) print(' - Layer {} as {} array in {:,}kb.'.format(layer, feature.shape[1:], feature.size//1000)) @@ -407,9 +408,11 @@ def prepare_generation(self): self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in args.layers} self.matcher_history = {l: T.vector() for l in args.layers} self.matcher_inputs = {self.model.network['dup'+l]: self.matcher_tensors[l] for l in args.layers} + self.matcher_inputs.update({self.model.network['map']: self.model.tensor_map}) nn_layers = [self.model.network['nn'+l] for l in args.layers] self.matcher_outputs = dict(zip(args.layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) - self.compute_matches = {l: self.compile([self.matcher_history[l]], self.do_match_patches(l)) for l in args.layers} + self.compute_matches = {l: self.compile([self.matcher_history[l], self.model.tensor_map], + self.do_match_patches(l)) for l in args.layers} # Decoding intermediate features into more specialized features and all the way to the output image. self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0_0', self.model.tensor_img)] @@ -489,7 +492,7 @@ def evaluate_slices(self, l, f, v): self.normalize_components(l, weights, (bi, bs)) layer.W.set_value(weights) - cur_idx, cur_val, cur_match = self.compute_matches[l](history[idx]) + cur_idx, cur_val, cur_match = self.compute_matches[l](history[idx], self.content_map) if best_idx is None: best_idx, best_val = cur_idx, cur_val else: @@ -507,7 +510,7 @@ def evaluate_feature(self, layer, feature, variety=0.0): patches = self.style_data[layer][0] best_idx, best_val = self.evaluate_slices(layer, feature, variety) - better_patches = patches[best_idx].astype(np.float32).transpose((0, 2, 3, 1)) + better_patches = patches[best_idx,:self.model.channels[layer]].astype(np.float32).transpose((0, 2, 3, 1)) better_shape = feature.shape[2:] + (feature.shape[1],) better_feature = reconstruct_from_patches_2d(better_patches, better_shape) @@ -521,13 +524,6 @@ def evaluate_feature(self, layer, feature, variety=0.0): return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] def evaluate_merge(self, features): - """ - # The new set of features is a blend of matched patches, input image, and previous layer. - desired_feature = (1.0 - previous_weight - content_weight) * better_features \ - + content_weight * content_feature + previous_weight * previous_feature \ - + - """ - decoded1 = [decode(data, self.content_map) for decode, data in zip(self.decoders, features[:-1])] encoded1 = [encode(data, self.content_map) for encode, data in zip(self.encoders, features[+1:])] @@ -550,17 +546,15 @@ def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ frame = 0 - # - # parameters = zip(args.layers, *[extend(a) for a in layer_params]) - current_features = [np.copy(f) for f in self.content_features] self.render(frame, args.layers[0], current_features[0]) for j in range(args.iterations): frame += 1 print('\n{}Iteration {}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) - desired_features = [self.evaluate_feature(l, f) for l, f in zip(args.layers, current_features)] - current_features = self.evaluate_merge(desired_features) + for i in range(2): + current_features = [self.evaluate_feature(l, f, v) for l, f, v in zip(args.layers, current_features, extend(args.variety))] + current_features = self.evaluate_merge(current_features) self.render(frame, args.layers[-1], current_features[-1]) return self.decoders[-1](current_features[-1], self.content_map) From c1ddad5ab358af0d2a8e34f796aa3a7f6c9a17a4 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Mon, 23 May 2016 14:32:49 +0200 Subject: [PATCH 36/58] Support for arbitrary layer numbers in feature exchange code. --- doodle.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/doodle.py b/doodle.py index fa11a1d..411e89e 100755 --- a/doodle.py +++ b/doodle.py @@ -26,6 +26,7 @@ import pickle import argparse import itertools +import collections # Configure all options first so we can later custom-load other libraries (Theano) based on device specified by user. @@ -524,16 +525,16 @@ def evaluate_feature(self, layer, feature, variety=0.0): return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] def evaluate_merge(self, features): - decoded1 = [decode(data, self.content_map) for decode, data in zip(self.decoders, features[:-1])] - encoded1 = [encode(data, self.content_map) for encode, data in zip(self.encoders, features[+1:])] - - decoded2 = [decode(data, self.content_map) for decode, data in zip(self.decoders[+1:], decoded1[:-1])] - encoded2 = [encode(data, self.content_map) for encode, data in zip(self.encoders[:-1], encoded1[+1:])] - - decoded1[-1] = (decoded1[-1] + decoded2[-1]) * 0.5 - encoded1[+0] = (encoded1[+0] + encoded2[+0]) * 0.5 - exchanged = [encoded1[0], (encoded1[1] + decoded1[0]) / 2.0, decoded1[1]] + decoded, encoded, ready = features, features, collections.defaultdict(list) + for i in range(len(features)-1): + print('merge', i) + decoded = [decode(data, self.content_map) for decode, data in zip(self.decoders[+i:len(self.decoders)], decoded[:-1])] + encoded = [encode(data, self.content_map) for encode, data in zip(self.encoders[:len(self.encoders)-i], encoded[+1:])] + for d in decoded: ready[d.shape].append(d) + for e in encoded: ready[e.shape].append(e) + exchanged = [sum(ready[f.shape]) / len(ready[f.shape]) for f in features] + params, result = zip(*[extend(a) for a in [args.content_weight, args.previous_weight, args.noise_weight]]), [] for f, c, e, p in zip(features, self.content_features, exchanged, params): content_weight, previous_weight, noise_weight = p @@ -552,7 +553,7 @@ def evaluate(self, Xn): for j in range(args.iterations): frame += 1 print('\n{}Iteration {}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) - for i in range(2): + for i in range(1): current_features = [self.evaluate_feature(l, f, v) for l, f, v in zip(args.layers, current_features, extend(args.variety))] current_features = self.evaluate_merge(current_features) self.render(frame, args.layers[-1], current_features[-1]) From ac03badd5648b85e273aa0f7e82321386e5500a6 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Mon, 23 May 2016 22:59:13 +0200 Subject: [PATCH 37/58] Re-ordering of the exchange/merge operations. --- doodle.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/doodle.py b/doodle.py index 411e89e..c63a0e0 100755 --- a/doodle.py +++ b/doodle.py @@ -524,21 +524,21 @@ def evaluate_feature(self, layer, feature, variety=0.0): return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] - def evaluate_merge(self, features): - decoded, encoded, ready = features, features, collections.defaultdict(list) - + def evaluate_exchange(self, features): + decoded, encoded, ready = features, features, {f.shape: [f] for f in features} for i in range(len(features)-1): - print('merge', i) decoded = [decode(data, self.content_map) for decode, data in zip(self.decoders[+i:len(self.decoders)], decoded[:-1])] encoded = [encode(data, self.content_map) for encode, data in zip(self.encoders[:len(self.encoders)-i], encoded[+1:])] for d in decoded: ready[d.shape].append(d) for e in encoded: ready[e.shape].append(e) - exchanged = [sum(ready[f.shape]) / len(ready[f.shape]) for f in features] + # TODO: Weighted contribution of features of this layer with other layers... + return [sum(ready.get(f.shape, [f])) / len(ready.get(f.shape, [f])) for f in features] - params, result = zip(*[extend(a) for a in [args.content_weight, args.previous_weight, args.noise_weight]]), [] - for f, c, e, p in zip(features, self.content_features, exchanged, params): - content_weight, previous_weight, noise_weight = p - mixed = f * (1.0 - content_weight - previous_weight) + c * content_weight + e * previous_weight \ + def evaluate_merge(self, features): + params, result = zip(*[extend(a) for a in [args.content_weight, args.noise_weight]]), [] + for f, c, p in zip(features, self.content_features, params): + content_weight, noise_weight = p + mixed = f * (1.0 - content_weight) + c * content_weight \ + np.random.normal(0.0, 1.0, size=f.shape).astype(np.float32) * noise_weight result.append(mixed) return result @@ -553,9 +553,9 @@ def evaluate(self, Xn): for j in range(args.iterations): frame += 1 print('\n{}Iteration {}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) - for i in range(1): - current_features = [self.evaluate_feature(l, f, v) for l, f, v in zip(args.layers, current_features, extend(args.variety))] current_features = self.evaluate_merge(current_features) + current_features = [self.evaluate_feature(l, f, v) for l, f, v in zip(args.layers, current_features, extend(args.variety))] + current_features = self.evaluate_exchange(current_features) self.render(frame, args.layers[-1], current_features[-1]) return self.decoders[-1](current_features[-1], self.content_map) From 1f969b756e086ee906eb39b101d7af04e62e510a Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 29 May 2016 13:00:57 +0200 Subject: [PATCH 38/58] Switch to new network architecture, simplified layer handling with integers. --- doodle.py | 164 +++++++++++++++++++++++++----------------------------- 1 file changed, 77 insertions(+), 87 deletions(-) diff --git a/doodle.py b/doodle.py index c63a0e0..7a8aee3 100755 --- a/doodle.py +++ b/doodle.py @@ -35,11 +35,11 @@ add_arg = parser.add_argument add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') -add_arg('--layers', default=['5_1','4_1','3_1'], nargs='+', type=str, help='The layers/scales to process.') -add_arg('--variety', default=[0.2, 0.1, 0.0], nargs='+', type=float, help='Bias selecting diverse patches') -add_arg('--previous-weight', default=[0.0, 0.2], nargs='+', type=float, help='Weight of previous layer features.') -add_arg('--content-weight', default=[0.0], nargs='+', type=float, help='Weight of input content features each layer.') -add_arg('--noise-weight', default=[0.0], nargs='+', type=float, help='Weight of noise added into features.') +add_arg('--variety', default=[.2,.1,.0], nargs='+', type=float, help='Bias selecting diverse patches') +add_arg('--layers', default=[5, 4, 3], nargs='+', type=int, help='The layers/scales to process.') +add_arg('--layer-weight', default=[1.0], nargs='+', type=float, help='Weight of previous layer features.') +add_arg('--content-weight', default=[0.3], nargs='+', type=float, help='Weight of input content features each layer.') +add_arg('--noise-weight', default=[0.1], nargs='+', type=float, help='Weight of noise added into features.') add_arg('--iterations', default=1, type=int, help='Number of iterations to run in each phase.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') @@ -74,7 +74,7 @@ def error(message, *lines): sys.exit(-1) def extend(lst): return itertools.chain(lst, itertools.repeat(lst[-1])) -def snap(value, grid=2**(int(args.layers[0][0])-1)): return int(grid * math.floor(value / grid)) +def snap(value, grid=2**(args.layers[0]-1)): return int(grid * math.floor(value / grid)) print("""{} {}High-quality image synthesis powered by Deep Learning!{} - Code licensed as AGPLv3, models under CC BY-NC-SA.{}""".format(ansi.CYAN_B, __doc__, ansi.CYAN, ansi.ENDC)) @@ -123,96 +123,86 @@ def setup_model(self, previous=None): and then adding augmentations for Semantic Style Transfer. """ net, self.channels = {}, {} + self.units = {1: 32, 2: 56, 3: 88, 4: 136, 5: 224, 6: 360} - net['map'] = InputLayer((1, 1, None, None)) + net['map'] = InputLayer((1, None, None, None)) for j in range(6): net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') self.tensor_latent = [] for l in args.layers: - self.tensor_latent.append((l, T.tensor4())) - # TODO: Move equation to calculate unit numbers into a common function, call from below too. - net['lat'+l] = InputLayer((None, min(768, 32 * 2**(int(l[0])-1)), None, None), var=self.tensor_latent[-1][1]) + self.tensor_latent.append((str(l), T.tensor4())) + net['lat%i'%l] = InputLayer((None, self.units[l], None, None), var=self.tensor_latent[-1][1]) - def EncdLayer(previous, channels, filter_size, **params): - incoming = net['lat'+previous] if previous in args.layers else net['enc'+previous] - return ConvLayer(incoming, channels, filter_size, **params) + def EncdLayer(previous, channels, filter_size, pad, stride=(1,1), nonlinearity=lasagne.nonlinearities.elu): + incoming = net['lat'+previous[0]] if int(previous[0]) in args.layers and previous[1:] == '_1' else net['enc'+previous] + return ConvLayer(incoming, channels, filter_size, pad=pad, stride=stride, nonlinearity=nonlinearity) - custom = {'nonlinearity': lasagne.nonlinearities.elu} # Encoder part of the neural network, takes an input image and turns it into abstract patterns. net['img'] = previous or InputLayer((None, 3, None, None)) - net['enc0_0'], net['lat0_0'] = net['img'], net['img'] - net['enc1_1'] = EncdLayer('0_0', 32, 3, pad=1, **custom) - net['enc1_2'] = EncdLayer('1_1', 32, 3, pad=1, **custom) - net['enc2_1'] = EncdLayer('1_2', 64, 2, pad=0, stride=(2,2), **custom) - net['enc2_2'] = EncdLayer('2_1', 64, 3, pad=1, **custom) - net['enc3_1'] = EncdLayer('2_2', 128, 2, pad=0, stride=(2,2), **custom) - net['enc3_2'] = EncdLayer('3_1', 128, 3, pad=1, **custom) - net['enc3_3'] = EncdLayer('3_2', 128, 3, pad=1, **custom) - net['enc3_4'] = EncdLayer('3_3', 128, 3, pad=1, **custom) - net['enc4_1'] = EncdLayer('3_4', 256, 2, pad=0, stride=(2,2), **custom) - net['enc4_2'] = EncdLayer('4_1', 256, 3, pad=1, **custom) - net['enc4_3'] = EncdLayer('4_2', 256, 3, pad=1, **custom) - net['enc4_4'] = EncdLayer('4_3', 256, 3, pad=1, **custom) - net['enc5_1'] = EncdLayer('4_4', 512, 2, pad=0, stride=(2,2), **custom) - net['enc5_2'] = EncdLayer('5_1', 512, 3, pad=1, **custom) - net['enc5_3'] = EncdLayer('5_2', 512, 3, pad=1, **custom) - net['enc5_4'] = EncdLayer('5_3', 512, 3, pad=1, **custom) - net['enc6_1'] = EncdLayer('5_4', 768, 2, pad=0, stride=(2,2), **custom) - - def DecdLayer(copy, previous, channels, **params): - # Dynamically injects intermediate "pitstop" output layers in the decoder based on what the user - # specified as layers. It's rather inelegant... Needs a rework! - dup, incoming = net['enc'+copy], net['lat'+copy] if copy in args.layers else net[previous] - return DeconvLayer(incoming, channels, dup.filter_size, stride=dup.stride, crop=dup.pad, - nonlinearity=params.get('nonlinearity', lasagne.nonlinearities.elu)) + net['enc0_0'], net['lat0'] = net['img'], net['img'] + net['enc1_1'] = EncdLayer('0_0', 32, 3, pad=1) + net['enc1_2'] = EncdLayer('1_1', 32, 3, pad=1) + net['enc2_1'] = EncdLayer('1_2', 56, 2, pad=0, stride=(2,2)) + net['enc2_2'] = EncdLayer('2_1', 56, 3, pad=1) + net['enc3_1'] = EncdLayer('2_2', 88, 2, pad=0, stride=(2,2)) + net['enc3_2'] = EncdLayer('3_1', 88, 3, pad=1) + net['enc3_3'] = EncdLayer('3_2', 88, 3, pad=1) + net['enc4_1'] = EncdLayer('3_3', 136, 2, pad=0, stride=(2,2)) + net['enc4_2'] = EncdLayer('4_1', 136, 3, pad=1) + net['enc4_3'] = EncdLayer('4_2', 136, 3, pad=1) + net['enc5_1'] = EncdLayer('4_3', 224, 2, pad=0, stride=(2,2)) + net['enc5_2'] = EncdLayer('5_1', 224, 3, pad=1) + net['enc5_3'] = EncdLayer('5_2', 224, 3, pad=1) + net['enc6_1'] = EncdLayer('5_3', 360, 2, pad=0, stride=(2,2)) + + def DecdLayer(copy, previous, channels, nonlinearity=lasagne.nonlinearities.elu): + # Dynamically injects intermediate "pitstop" output layers in the decoder based on what the user specified as layers. + dup, incoming = net['enc'+copy], net['lat'+copy[0]] if int(copy[0]) in args.layers and copy[1:] == '_1' else net[previous] + return DeconvLayer(incoming, channels, dup.filter_size, stride=dup.stride, crop=dup.pad, nonlinearity=nonlinearity) # Decoder part of the neural network, takes abstract patterns and converts them into an image! - net['dec6_1'] = DecdLayer('6_1', 'enc6_1', 512) - net['dec5_4'] = DecdLayer('5_4', 'dec6_1', 512) - net['dec5_3'] = DecdLayer('5_3', 'dec5_4', 512) - net['dec5_2'] = DecdLayer('5_2', 'dec5_3', 512) - net['dec5_1'] = DecdLayer('5_1', 'dec5_2', 256) - net['dec4_4'] = DecdLayer('4_4', 'dec5_1', 256) - net['dec4_3'] = DecdLayer('4_3', 'dec4_4', 256) - net['dec4_2'] = DecdLayer('4_2', 'dec4_3', 256) - net['dec4_1'] = DecdLayer('4_1', 'dec4_2', 128) - net['dec3_4'] = DecdLayer('3_4', 'dec4_1', 128) - net['dec3_3'] = DecdLayer('3_3', 'dec3_4', 128) - net['dec3_2'] = DecdLayer('3_2', 'dec3_3', 128) - net['dec3_1'] = DecdLayer('3_1', 'dec3_2', 64) - net['dec2_2'] = DecdLayer('2_2', 'dec3_1', 64) - net['dec2_1'] = DecdLayer('2_1', 'dec2_2', 32) - net['dec1_2'] = DecdLayer('1_2', 'dec2_1', 32) - net['dec1_1'] = DecdLayer('1_1', 'dec1_2', 3, nonlinearity=lasagne.nonlinearities.tanh) - net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec1_1']) + net['dec5_3'] = DecdLayer('6_1', 'enc6_1', 224) + net['dec5_2'] = DecdLayer('5_3', 'dec5_3', 224) + net['dec5_1'] = DecdLayer('5_2', 'dec5_2', 224) + net['dec4_3'] = DecdLayer('5_1', 'dec5_1', 136) + net['dec4_2'] = DecdLayer('4_3', 'dec4_3', 136) + net['dec4_1'] = DecdLayer('4_2', 'dec4_2', 136) + net['dec3_3'] = DecdLayer('4_1', 'dec4_1', 88) + net['dec3_2'] = DecdLayer('3_3', 'dec3_3', 88) + net['dec3_1'] = DecdLayer('3_2', 'dec3_2', 88) + net['dec2_2'] = DecdLayer('3_1', 'dec3_1', 56) + net['dec2_1'] = DecdLayer('2_2', 'dec2_2', 56) + net['dec1_2'] = DecdLayer('2_1', 'dec2_1', 32) + net['dec1_1'] = DecdLayer('1_2', 'dec1_2', 32) + net['dec0_1'] = DecdLayer('1_1', 'dec1_1', 3, nonlinearity=lasagne.nonlinearities.tanh) + net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec0_1'], shared_axes=(0,1,2,3)) net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) def ConcatenateLayer(incoming, layer): - return ConcatLayer([incoming, net['map%i'%int(layer[0])]]) if args.semantic_weight > 0.0 else incoming + return ConcatLayer([incoming, net['map%i'%layer]]) if args.semantic_weight > 0.0 else incoming # Auxiliary network for the semantic layers, and the nearest neighbors calculations. for layer, upper, lower in zip(args.layers, [None] + args.layers[:-1], args.layers[1:] + [None]): - self.channels[layer] = net['enc'+layer].num_filters - net['sem'+layer] = ConcatenateLayer(net['enc'+layer], layer) - net['dup'+layer] = InputLayer(net['enc'+layer].output_shape) - net['nn'+layer] = ConvLayer(ConcatenateLayer(net['dup'+layer], layer), 1, 3, b=None, pad=0, flip_filters=False) + self.channels[layer] = net['enc%i_1'%layer].num_filters + net['sem%i'%layer] = ConcatenateLayer(net['enc%i_1'%layer], layer) + net['dup%i'%layer] = InputLayer(net['enc%i_1'%layer].output_shape) + net['nn%i'%layer] = ConvLayer(ConcatenateLayer(net['dup%i'%layer], layer), 1, 3, b=None, pad=0, flip_filters=False) self.network = net def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - data_file = os.path.join(os.path.dirname(__file__), 'gelu2_conv.pkl') + data_file = os.path.join(os.path.dirname(__file__), 'gelu3_conv.pkl') if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download from here...", - "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu2_conv.pkl") + "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu3_conv.pkl") data = pickle.load(open(data_file, 'rb')) for layer, values in data.items(): - assert layer in self.network, "Layer `{}` not found as expected.".format(layer) for p, v in zip(self.network[layer].get_params(), values): assert p.get_value().shape == v.shape, "Layer `{}` in network has size {} but data is {}."\ - .format(layer, v.shape, p.get_value().shape) + .format(layer, p.get_value().shape, v.shape) p.set_value(v.astype(np.float32)) def setup(self, layers): @@ -335,10 +325,10 @@ def prepare_style(self, scale=1.0): self.style_img = self.model.prepare_image(style_img_original) self.style_map = style_map_original.transpose((2, 0, 1))[np.newaxis].astype(np.float32) - input_tensors = self.model.tensor_latent[1:] + [('0_0', self.model.tensor_img)] + input_tensors = self.model.tensor_latent[1:] + [('0', self.model.tensor_img)] self.encoders = [] for layer, (input, tensor_latent), shape in zip(args.layers, input_tensors, extend(args.shapes)): - output = lasagne.layers.get_output(self.model.network['sem'+layer], + output = lasagne.layers.get_output(self.model.network['sem%i'%layer], {self.model.network['lat'+input]: tensor_latent, self.model.network['map']: self.model.tensor_map}) fn = self.compile([tensor_latent, self.model.tensor_map], [output] + self.do_extract_patches([layer], [output], [shape])) @@ -349,7 +339,7 @@ def prepare_style(self, scale=1.0): for layer, encoder in reversed(list(zip(args.layers, self.encoders))): feature, *data = encoder(feature, self.style_map) feature = feature[:,:self.model.channels[layer]] - patches, l = data[0], self.model.network['nn'+layer] + patches, l = data[0], self.model.network['nn%i'%layer] l.num_filters = patches.shape[0] // args.slices self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\ + [np.zeros((patches.shape[0],), dtype=np.float16)] @@ -384,7 +374,7 @@ def prepare_content(self, scale=1.0): if content_img_original is None: print(" - No content image found; seed was set to random noise.") - content_img_original = np.random.uniform(0, 255, content_map_original.shape[:2]+(3,)).astype(np.float32) + content_img_original = np.random.uniform(0, 64, content_map_original.shape[:2]+(3,)).astype(np.float32) if content_map_original.shape[2] != self.style_map.shape[1]: error("Mismatch in number of channels for style and content semantic map.", @@ -408,27 +398,26 @@ def prepare_generation(self): # Patch matching calculation that uses only pre-calculated features and a slice of the patches. self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in args.layers} self.matcher_history = {l: T.vector() for l in args.layers} - self.matcher_inputs = {self.model.network['dup'+l]: self.matcher_tensors[l] for l in args.layers} + self.matcher_inputs = {self.model.network['dup%i'%l]: self.matcher_tensors[l] for l in args.layers} self.matcher_inputs.update({self.model.network['map']: self.model.tensor_map}) - nn_layers = [self.model.network['nn'+l] for l in args.layers] + nn_layers = [self.model.network['nn%i'%l] for l in args.layers] self.matcher_outputs = dict(zip(args.layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) self.compute_matches = {l: self.compile([self.matcher_history[l], self.model.tensor_map], self.do_match_patches(l)) for l in args.layers} # Decoding intermediate features into more specialized features and all the way to the output image. - self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0_0', self.model.tensor_img)] - for layer, (input, tensor_latent) in zip(args.layers, input_tensors): - layer = lasagne.layers.get_output(self.model.network['enc'+layer], + self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0', self.model.tensor_img)] + for name, (input, tensor_latent) in zip(args.layers, input_tensors): + layer = lasagne.layers.get_output(self.model.network['enc%i_1'%name], {self.model.network['lat'+input]: tensor_latent, self.model.network['map']: self.model.tensor_map}) fn = self.compile([tensor_latent, self.model.tensor_map], layer) self.encoders.append(fn) - self.decoders, output_layers = [], (['dec'+l for l in args.layers[1:]] + ['out']) - for layer, (tt, tensor_latent), output in zip(args.layers, self.model.tensor_latent, output_layers): - output = output.replace('_1', '_2') + self.decoders, output_layers = [], (['dec%i_1'%l for l in args.layers[1:]] + ['out']) + for name, (input, tensor_latent), output in zip(args.layers, self.model.tensor_latent, output_layers): layer = lasagne.layers.get_output(self.model.network[output], - {self.model.network['lat'+layer]: tensor_latent, + {self.model.network['lat'+input]: tensor_latent, self.model.network['map']: self.model.tensor_map}) fn = self.compile([tensor_latent, self.model.tensor_map], layer) self.decoders.append(fn) @@ -484,7 +473,7 @@ def evaluate_slices(self, l, f, v): self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) - layer, data = self.model.network['nn'+l], self.style_data[l] + layer, data = self.model.network['nn%i'%l], self.style_data[l] history = data[-1] best_idx, best_val = None, 0.0 @@ -518,21 +507,22 @@ def evaluate_feature(self, layer, feature, variety=0.0): used = 99.9 * len(set(best_idx)) / best_idx.shape[0] dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] err = best_val.mean() - print(' {}layer{} {:>3} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ + print(' {}layer{} {:>1} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, dups, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] def evaluate_exchange(self, features): - decoded, encoded, ready = features, features, {f.shape: [f] for f in features} + decoded, encoded = features, features + weights = {f.shape: w for f, w in zip(features, extend(args.layer_weight))} + ready = {f.shape: [(f, weights[f.shape])] for f in features} for i in range(len(features)-1): decoded = [decode(data, self.content_map) for decode, data in zip(self.decoders[+i:len(self.decoders)], decoded[:-1])] encoded = [encode(data, self.content_map) for encode, data in zip(self.encoders[:len(self.encoders)-i], encoded[+1:])] - for d in decoded: ready[d.shape].append(d) - for e in encoded: ready[e.shape].append(e) - # TODO: Weighted contribution of features of this layer with other layers... - return [sum(ready.get(f.shape, [f])) / len(ready.get(f.shape, [f])) for f in features] + for d in decoded: ready[d.shape].append((d, weights[d.shape])) + for e in encoded: ready[e.shape].append((e, weights[d.shape])) + return [sum([a*w for a, w in ready.get(f.shape, [(f,1.0)])]) / sum([w for _, w in ready.get(f.shape, [(f,1.0)])]) for f in features] def evaluate_merge(self, features): params, result = zip(*[extend(a) for a in [args.content_weight, args.noise_weight]]), [] @@ -574,7 +564,7 @@ def render(self, frame, layer, features): def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ - self.model.setup(layers=['enc'+l for l in args.layers] + ['sem'+l for l in args.layers] + ['dec'+l for l in args.layers]) + self.model.setup(layers=['enc%i_1'%l for l in args.layers] + ['sem%i'%l for l in args.layers] + ['dec%i_1'%l for l in args.layers]) self.prepare_style() self.prepare_content() self.prepare_generation() From f81446a43de31e835bfbd42db2d3d297f78b2f57 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 29 May 2016 21:19:07 +0200 Subject: [PATCH 39/58] Support for micro-iterations within the macro-passes. --- doodle.py | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/doodle.py b/doodle.py index 7a8aee3..9264bf5 100755 --- a/doodle.py +++ b/doodle.py @@ -40,7 +40,8 @@ add_arg('--layer-weight', default=[1.0], nargs='+', type=float, help='Weight of previous layer features.') add_arg('--content-weight', default=[0.3], nargs='+', type=float, help='Weight of input content features each layer.') add_arg('--noise-weight', default=[0.1], nargs='+', type=float, help='Weight of noise added into features.') -add_arg('--iterations', default=1, type=int, help='Number of iterations to run in each phase.') +add_arg('--iterations', default=[1], nargs='+', type=int, help='Number of times to repeat layer optimization.') +add_arg('--passes', default=2, type=int, help='Number of times to go over the whole image.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. style features.') @@ -193,13 +194,14 @@ def ConcatenateLayer(incoming, layer): def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - data_file = os.path.join(os.path.dirname(__file__), 'gelu3_conv.pkl') + data_file = os.path.join(os.path.dirname(__file__), 'gelu4_conv.pkl') if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download from here...", "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu3_conv.pkl") data = pickle.load(open(data_file, 'rb')) for layer, values in data.items(): + if layer not in self.network: continue for p, v in zip(self.network[layer].get_params(), values): assert p.get_value().shape == v.shape, "Layer `{}` in network has size {} but data is {}."\ .format(layer, p.get_value().shape, v.shape) @@ -493,25 +495,27 @@ def evaluate_slices(self, l, f, v): return best_idx, best_val - def evaluate_feature(self, layer, feature, variety=0.0): + def evaluate_feature(self, layer, feature, variety=0.0, iterations=1): """Compute best matching patches for this layer, then merge patches into a single feature array of same size. """ iter_time = time.time() - patches = self.style_data[layer][0] - best_idx, best_val = self.evaluate_slices(layer, feature, variety) - better_patches = patches[best_idx,:self.model.channels[layer]].astype(np.float32).transpose((0, 2, 3, 1)) - better_shape = feature.shape[2:] + (feature.shape[1],) - better_feature = reconstruct_from_patches_2d(better_patches, better_shape) - used = 99.9 * len(set(best_idx)) / best_idx.shape[0] - dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] - err = best_val.mean() - print(' {}layer{} {:>1} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ - .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, dups, - ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) + for _ in range(iterations): + best_idx, best_val = self.evaluate_slices(layer, feature, variety) + better_patches = patches[best_idx,:self.model.channels[layer]].astype(np.float32).transpose((0, 2, 3, 1)) + better_shape = feature.shape[2:] + (feature.shape[1],) + better_feature = reconstruct_from_patches_2d(better_patches, better_shape) + feature = better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] + + used = 99.9 * len(set(best_idx)) / best_idx.shape[0] + dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] + err = best_val.mean() + print(' {}layer{} {:>1} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ + .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, dups, + ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] + return feature def evaluate_exchange(self, features): decoded, encoded = features, features @@ -536,15 +540,15 @@ def evaluate_merge(self, features): def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ - frame = 0 + frame, extra = 0, [extend(args.variety), extend(args.iterations)] current_features = [np.copy(f) for f in self.content_features] self.render(frame, args.layers[0], current_features[0]) - for j in range(args.iterations): + for j in range(args.passes): frame += 1 print('\n{}Iteration {}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) current_features = self.evaluate_merge(current_features) - current_features = [self.evaluate_feature(l, f, v) for l, f, v in zip(args.layers, current_features, extend(args.variety))] + current_features = [self.evaluate_feature(l, f, *e) for l, f, *e in zip(args.layers, current_features, *extra)] current_features = self.evaluate_exchange(current_features) self.render(frame, args.layers[-1], current_features[-1]) From 90f7db367387b83b179eee78206bfd131f618edb Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 29 May 2016 23:48:51 +0200 Subject: [PATCH 40/58] Integrated the feature merging code with the layer evaluation, can use multiple iteration within layers again--helps with speed. --- doodle.py | 82 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/doodle.py b/doodle.py index 9264bf5..6b1941c 100755 --- a/doodle.py +++ b/doodle.py @@ -35,14 +35,15 @@ add_arg = parser.add_argument add_arg('--content', default=None, type=str, help='Subject image path to repaint in new style.') add_arg('--style', default=None, type=str, help='Texture image path to extract patches from.') +add_arg('--passes', default=2, type=int, help='Number of times to go over the whole image.') add_arg('--variety', default=[.2,.1,.0], nargs='+', type=float, help='Bias selecting diverse patches') add_arg('--layers', default=[5, 4, 3], nargs='+', type=int, help='The layers/scales to process.') add_arg('--layer-weight', default=[1.0], nargs='+', type=float, help='Weight of previous layer features.') add_arg('--content-weight', default=[0.3], nargs='+', type=float, help='Weight of input content features each layer.') add_arg('--noise-weight', default=[0.1], nargs='+', type=float, help='Weight of noise added into features.') add_arg('--iterations', default=[1], nargs='+', type=int, help='Number of times to repeat layer optimization.') -add_arg('--passes', default=2, type=int, help='Number of times to go over the whole image.') -add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') +add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') +add_arg('--seed', default=None, type=int, help='Initial state for the random number generator.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. style features.') add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') @@ -194,7 +195,7 @@ def ConcatenateLayer(incoming, layer): def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - data_file = os.path.join(os.path.dirname(__file__), 'gelu4_conv.pkl') + data_file = os.path.join(os.path.dirname(__file__), 'gelu3_conv.pkl') if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download from here...", "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu3_conv.pkl") @@ -249,6 +250,7 @@ def __init__(self): """Constructor sets up global variables, loads and validates files, then builds the model. """ self.start_time = time.time() + np.random.seed(args.seed) # Prepare file output and load files specified as input. if args.frames is not False: @@ -344,7 +346,7 @@ def prepare_style(self, scale=1.0): patches, l = data[0], self.model.network['nn%i'%layer] l.num_filters = patches.shape[0] // args.slices self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\ - + [np.zeros((patches.shape[0],), dtype=np.float16)] + + [np.zeros((patches.shape[0],), dtype=np.float16), -1] print(' - Layer {} as {} patches {} in {:,}kb.'.format(layer, patches.shape[:2], patches.shape[2:], patches.size//1000)) def prepare_content(self, scale=1.0): @@ -392,7 +394,7 @@ def prepare_content(self, scale=1.0): feature, *_ = encoder(feature, self.content_map) feature = feature[:,:self.model.channels[layer]] self.content_features.insert(0, feature) - print(' - Layer {} as {} array in {:,}kb.'.format(layer, feature.shape[1:], feature.size//1000)) + print(" - Layer {} as {} array in {:,}kb.".format(layer, feature.shape[1:], feature.size//1000)) def prepare_generation(self): """Layerwise synthesis images requires two sets of Theano functions to be compiled. @@ -475,11 +477,9 @@ def evaluate_slices(self, l, f, v): self.normalize_components(l, f, self.compute_norms(np, l, f)) self.matcher_tensors[l].set_value(f) - layer, data = self.model.network['nn%i'%l], self.style_data[l] - history = data[-1] - + layer, data, history = self.model.network['nn%i'%l], self.style_data[l], self.style_data[l][-2] best_idx, best_val = None, 0.0 - for idx, (bp, bi, bs, bh) in self.iterate_batches(*data, batch_size=layer.num_filters): + for idx, (bp, bi, bs, bh) in self.iterate_batches(*data[:-1], batch_size=layer.num_filters): weights = bp.astype(np.float32) self.normalize_components(l, weights, (bi, bs)) layer.W.set_value(weights) @@ -495,27 +495,39 @@ def evaluate_slices(self, l, f, v): return best_idx, best_val - def evaluate_feature(self, layer, feature, variety=0.0, iterations=1): + def evaluate_feature(self, layer, feature, variety=0.0): """Compute best matching patches for this layer, then merge patches into a single feature array of same size. """ iter_time = time.time() - patches = self.style_data[layer][0] - - for _ in range(iterations): - best_idx, best_val = self.evaluate_slices(layer, feature, variety) - better_patches = patches[best_idx,:self.model.channels[layer]].astype(np.float32).transpose((0, 2, 3, 1)) - better_shape = feature.shape[2:] + (feature.shape[1],) - better_feature = reconstruct_from_patches_2d(better_patches, better_shape) - feature = better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] - - used = 99.9 * len(set(best_idx)) / best_idx.shape[0] - dups = 99.9 * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] - err = best_val.mean() - print(' {}layer{} {:>1} {}patches{} used {:2.0f}% dups {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ - .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, dups, - ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - - return feature + patches, indices = self.style_data[layer][0], self.style_data[layer][-1] + + best_idx, best_val = self.evaluate_slices(layer, feature, variety) + better_patches = patches[best_idx,:self.model.channels[layer]].astype(np.float32).transpose((0, 2, 3, 1)) + better_shape = feature.shape[2:] + (feature.shape[1],) + better_feature = reconstruct_from_patches_2d(better_patches, better_shape) + + used = 99. * len(set(best_idx)) / best_idx.shape[0] + duplicates = 99. * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] + changed = 99. * (1.0 - np.where(indices == best_idx)[0].shape[0] / best_idx.shape[0]) + err = best_val.mean() + print(' {}layer{} {:>1} {}patches{} used {:2.0f}% dups {:2.0f}% chgd {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ + .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, duplicates, changed, + ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) + + self.style_data[layer][-1] = best_idx + return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] + + def evaluate_features(self, features): + params = zip(*[extend(a) for a in [args.content_weight, args.noise_weight, args.variety, args.iterations]]) + result = [] + for l, f, c, p in zip(args.layers, features, self.content_features, params): + content_weight, noise_weight, variety, iterations = p + for _ in range(iterations): + feature = f * (1.0 - content_weight) + c * content_weight \ + + np.random.normal(0.0, 1.0, size=f.shape).astype(np.float32) * noise_weight + f = self.evaluate_feature(l, feature, variety) + result.append(f) + return result def evaluate_exchange(self, features): decoded, encoded = features, features @@ -528,27 +540,17 @@ def evaluate_exchange(self, features): for e in encoded: ready[e.shape].append((e, weights[d.shape])) return [sum([a*w for a, w in ready.get(f.shape, [(f,1.0)])]) / sum([w for _, w in ready.get(f.shape, [(f,1.0)])]) for f in features] - def evaluate_merge(self, features): - params, result = zip(*[extend(a) for a in [args.content_weight, args.noise_weight]]), [] - for f, c, p in zip(features, self.content_features, params): - content_weight, noise_weight = p - mixed = f * (1.0 - content_weight) + c * content_weight \ - + np.random.normal(0.0, 1.0, size=f.shape).astype(np.float32) * noise_weight - result.append(mixed) - return result - def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ - frame, extra = 0, [extend(args.variety), extend(args.iterations)] + frame = 0 current_features = [np.copy(f) for f in self.content_features] self.render(frame, args.layers[0], current_features[0]) for j in range(args.passes): frame += 1 - print('\n{}Iteration {}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) - current_features = self.evaluate_merge(current_features) - current_features = [self.evaluate_feature(l, f, *e) for l, f, *e in zip(args.layers, current_features, *extra)] + print('\n{}Pass #{}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) + current_features = self.evaluate_features(current_features) current_features = self.evaluate_exchange(current_features) self.render(frame, args.layers[-1], current_features[-1]) From 288f3acddb67e852de8775dd0c2986bdcce1080c Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Tue, 31 May 2016 08:09:23 +0200 Subject: [PATCH 41/58] Cleaned up default parameters for testing. --- doodle.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doodle.py b/doodle.py index 6b1941c..2b4b086 100755 --- a/doodle.py +++ b/doodle.py @@ -38,18 +38,18 @@ add_arg('--passes', default=2, type=int, help='Number of times to go over the whole image.') add_arg('--variety', default=[.2,.1,.0], nargs='+', type=float, help='Bias selecting diverse patches') add_arg('--layers', default=[5, 4, 3], nargs='+', type=int, help='The layers/scales to process.') -add_arg('--layer-weight', default=[1.0], nargs='+', type=float, help='Weight of previous layer features.') -add_arg('--content-weight', default=[0.3], nargs='+', type=float, help='Weight of input content features each layer.') -add_arg('--noise-weight', default=[0.1], nargs='+', type=float, help='Weight of noise added into features.') -add_arg('--iterations', default=[1], nargs='+', type=int, help='Number of times to repeat layer optimization.') +add_arg('--layer-weight', default=[1.0], nargs='+', type=float, help='Weight of previous layer features.') +add_arg('--content-weight', default=[.3,.2,.1], nargs='+', type=float, help='Weight of input content features each layer.') +add_arg('--noise-weight', default=[.2,.1,.0], nargs='+', type=float, help='Weight of noise added into features.') +add_arg('--iterations', default=[4, 4, 1], nargs='+', type=int, help='Number of times to repeat layer optimization.') add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') add_arg('--seed', default=None, type=int, help='Initial state for the random number generator.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') -add_arg('--semantic-weight', default=3.0, type=float, help='Global weight of semantics vs. style features.') +add_arg('--semantic-weight', default=0.0, type=float, help='Global weight of semantics vs. style features.') add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') add_arg('--frames', default=False, action='store_true', help='Render intermediate frames, takes more time.') -add_arg('--slices', default=2, type=int, help='Split patches up into this number of batches.') +add_arg('--slices', default=16, type=int, help='Split patches up into this number of batches.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') args = parser.parse_args() @@ -524,7 +524,7 @@ def evaluate_features(self, features): content_weight, noise_weight, variety, iterations = p for _ in range(iterations): feature = f * (1.0 - content_weight) + c * content_weight \ - + np.random.normal(0.0, 1.0, size=f.shape).astype(np.float32) * noise_weight + + np.random.normal(0.0, 1.0, size=f.shape).astype(np.float32) * (0.1 * noise_weight) f = self.evaluate_feature(l, feature, variety) result.append(f) return result From ec1d4019d32280c456546fdafafdcf90a51518dd Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Tue, 31 May 2016 20:36:28 +0200 Subject: [PATCH 42/58] First prototype of patch-matching. --- doodle.py | 121 ++++++++++++++++++++++-------------------------------- 1 file changed, 50 insertions(+), 71 deletions(-) diff --git a/doodle.py b/doodle.py index 2b4b086..764bbf9 100755 --- a/doodle.py +++ b/doodle.py @@ -182,14 +182,17 @@ def DecdLayer(copy, previous, channels, nonlinearity=lasagne.nonlinearities.elu) net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) def ConcatenateLayer(incoming, layer): + # TODO: The model is constructed too soon, we don't yet know if semantic_weight is needed. Fails if not. return ConcatLayer([incoming, net['map%i'%layer]]) if args.semantic_weight > 0.0 else incoming # Auxiliary network for the semantic layers, and the nearest neighbors calculations. + self.pm_inputs, self.pm_buffers, self.pm_candidates = {}, {}, {} for layer, upper, lower in zip(args.layers, [None] + args.layers[:-1], args.layers[1:] + [None]): self.channels[layer] = net['enc%i_1'%layer].num_filters net['sem%i'%layer] = ConcatenateLayer(net['enc%i_1'%layer], layer) - net['dup%i'%layer] = InputLayer(net['enc%i_1'%layer].output_shape) - net['nn%i'%layer] = ConvLayer(ConcatenateLayer(net['dup%i'%layer], layer), 1, 3, b=None, pad=0, flip_filters=False) + self.pm_inputs[layer] = T.ftensor4() + self.pm_buffers[layer] = T.ftensor4() + self.pm_candidates[layer] = T.itensor4() self.network = net def load_data(self): @@ -335,17 +338,15 @@ def prepare_style(self, scale=1.0): output = lasagne.layers.get_output(self.model.network['sem%i'%layer], {self.model.network['lat'+input]: tensor_latent, self.model.network['map']: self.model.tensor_map}) - fn = self.compile([tensor_latent, self.model.tensor_map], [output] + self.do_extract_patches([layer], [output], [shape])) + fn = self.compile([tensor_latent, self.model.tensor_map], [output] + self.do_extract_patches(layer, output)) self.encoders.append(fn) # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. self.style_data, feature = {}, self.style_img for layer, encoder in reversed(list(zip(args.layers, self.encoders))): feature, *data = encoder(feature, self.style_map) - feature = feature[:,:self.model.channels[layer]] - patches, l = data[0], self.model.network['nn%i'%layer] - l.num_filters = patches.shape[0] // args.slices - self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\ + feature, patches = feature[:,:self.model.channels[layer]], data[0] + self.style_data[layer] = [d.astype(np.float16) for d in data]\ + [np.zeros((patches.shape[0],), dtype=np.float16), -1] print(' - Layer {} as {} patches {} in {:,}kb.'.format(layer, patches.shape[:2], patches.shape[2:], patches.size//1000)) @@ -400,13 +401,7 @@ def prepare_generation(self): """Layerwise synthesis images requires two sets of Theano functions to be compiled. """ # Patch matching calculation that uses only pre-calculated features and a slice of the patches. - self.matcher_tensors = {l: lasagne.utils.shared_empty(dim=4) for l in args.layers} - self.matcher_history = {l: T.vector() for l in args.layers} - self.matcher_inputs = {self.model.network['dup%i'%l]: self.matcher_tensors[l] for l in args.layers} - self.matcher_inputs.update({self.model.network['map']: self.model.tensor_map}) - nn_layers = [self.model.network['nn%i'%l] for l in args.layers] - self.matcher_outputs = dict(zip(args.layers, lasagne.layers.get_output(nn_layers, self.matcher_inputs))) - self.compute_matches = {l: self.compile([self.matcher_history[l], self.model.tensor_map], + self.compute_matches = {l: self.compile([self.model.pm_inputs[l], self.model.pm_buffers[l], self.model.pm_candidates[l]], self.do_match_patches(l)) for l in args.layers} # Decoding intermediate features into more specialized features and all the way to the output image. @@ -431,90 +426,74 @@ def prepare_generation(self): # Theano Computation #------------------------------------------------------------------------------------------------------------------ - def do_extract_patches(self, layers, outputs, sizes, stride=1): + def do_extract_patches(self, layer, output, stride=1): """This function builds a Theano expression that will get compiled an run on the GPU. It extracts 3x3 patches from the intermediate outputs in the model. """ - results = [] - for layer, output, size in zip(layers, outputs, sizes): - # Use a Theano helper function to extract "neighbors" of specific size, seems a bit slower than doing - # it manually but much simpler! - patches = theano.tensor.nnet.neighbours.images2neibs(output, (size, size), (stride, stride), mode='valid') - # Make sure the patches are in the shape required to insert them into the model as another layer. - patches = patches.reshape((-1, patches.shape[0] // output.shape[1], size, size)).dimshuffle((1, 0, 2, 3)) - # Calculate the magnitude that we'll use for normalization at runtime, then store... - results.extend([patches] + self.compute_norms(T, layer, patches)) - return results + return [output] + self.compute_norms(T, layer, output) def do_match_patches(self, layer): - # Use node in the model to compute the result of the normalized cross-correlation, using results from the - # nearest-neighbor layers called 'nn3_1' and 'nn4_1'. - dist = self.matcher_outputs[layer] - dist = dist.reshape((dist.shape[1], -1)) - # Compute the score of each patch, taking into account statistics from previous iteration. This equalizes - # the chances of the patches being selected when the user requests more variety. - offset = self.matcher_history[layer].reshape((-1, 1)) - scores = dist - offset - # Pick the best style patches for each patch in the current image, the result is an array of indices. - # Also return the maximum value along both axis, used to compare slices and add patch variety. - return [scores.argmax(axis=0), scores.max(axis=0), dist.max(axis=1)] + inputs = self.model.pm_inputs[layer] + buffers = self.model.pm_buffers[layer] + indices = self.model.pm_candidates[layer] + + candidates = buffers[indices[:,:,:,0],:,indices[:,:,:,1],indices[:,:,:,2]].dimshuffle((3,0,1,2)) + reference = inputs[0,:,1:-1,1:-1].dimshuffle((0,1,2,'x')) + scores = T.sum(candidates * reference, axis=(0)) + return [scores.argmax(axis=(2)), scores.max(axis=(2))] # [scores.argmax(axis=0), scores.max(axis=0)] #------------------------------------------------------------------------------------------------------------------ # Optimization Loop #------------------------------------------------------------------------------------------------------------------ - def iterate_batches(self, *arrays, batch_size): - """Break down the data in arrays batch by batch and return them as a generator. - """ - total_size = arrays[0].shape[0] - indices = np.arange(total_size) - for index in range(0, total_size, batch_size): - excerpt = indices[index:index + batch_size] - yield excerpt, [a[excerpt] for a in arrays] - - def evaluate_slices(self, l, f, v): + def evaluate_patches(self, l, f, v): + buffers = self.style_data[l][0].astype(np.float32) + self.normalize_components(l, buffers, self.style_data[l][1:2]) self.normalize_components(l, f, self.compute_norms(np, l, f)) - self.matcher_tensors[l].set_value(f) - - layer, data, history = self.model.network['nn%i'%l], self.style_data[l], self.style_data[l][-2] - best_idx, best_val = None, 0.0 - for idx, (bp, bi, bs, bh) in self.iterate_batches(*data[:-1], batch_size=layer.num_filters): - weights = bp.astype(np.float32) - self.normalize_components(l, weights, (bi, bs)) - layer.W.set_value(weights) - - cur_idx, cur_val, cur_match = self.compute_matches[l](history[idx], self.content_map) - if best_idx is None: - best_idx, best_val = cur_idx, cur_val - else: - i = np.where(cur_val > best_val) - best_idx[i] = idx[cur_idx[i]] - best_val[i] = cur_val[i] - history[idx] = cur_match * v - return best_idx, best_val + SAMPLES = 64 + indices = np.zeros((f.shape[2]-2, f.shape[3]-2, SAMPLES, 3), dtype=np.int32) # TODO: patchsize + indices[:,:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:3]) # TODO: patchsize + indices[:,:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:3]) # TODO: patchsize + + identity = np.indices(buffers.shape[2:]).transpose((1,2,0))[:-2,:-2] + 1 # TODO: patchsize + indices[:,:,17,1:] = identity + + best_idx, best_val = self.compute_matches[l](f, buffers, indices) + # Numpy array indexing rules seem to require injecting the identity matrix back into the array. + accessors = np.concatenate([identity - 1, best_idx[:,:,np.newaxis]], axis=2) + ref_idx = indices[accessors[:,:,0],accessors[:,:,1],accessors[:,:,2]] + return ref_idx, best_val def evaluate_feature(self, layer, feature, variety=0.0): """Compute best matching patches for this layer, then merge patches into a single feature array of same size. """ iter_time = time.time() - patches, indices = self.style_data[layer][0], self.style_data[layer][-1] + B, indices = self.style_data[layer][0][:,:,:,:,np.newaxis,np.newaxis].astype(np.float32), self.style_data[layer][-1] + best_idx, best_val = self.evaluate_patches(layer, feature, variety) + # better_patches = buffers[best_idx[:,:,0],:,best_idx[:,:,1],best_idx[:,:,2]] + i0, i1, i2 = best_idx[:,:,0], best_idx[:,:,1], best_idx[:,:,2] - best_idx, best_val = self.evaluate_slices(layer, feature, variety) - better_patches = patches[best_idx,:self.model.channels[layer]].astype(np.float32).transpose((0, 2, 3, 1)) + better_patches = np.concatenate([np.concatenate([B[i0,:,i1-1,i2-1], B[i0,:,i1-1,i2+0], B[i0,:,i1-1,i2+1]], axis=4), + np.concatenate([B[i0,:,i1+0,i2-1], B[i0,:,i1+0,i2+0], B[i0,:,i1+0,i2+1]], axis=4), + np.concatenate([B[i0,:,i1+1,i2-1], B[i0,:,i1+1,i2+0], B[i0,:,i1+1,i2+1]], axis=4)], axis=3) + + better_patches = better_patches.reshape((-1,)+better_patches.shape[2:]).transpose((0,2,3,1)) better_shape = feature.shape[2:] + (feature.shape[1],) better_feature = reconstruct_from_patches_2d(better_patches, better_shape) - used = 99. * len(set(best_idx)) / best_idx.shape[0] - duplicates = 99. * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] - changed = 99. * (1.0 - np.where(indices == best_idx)[0].shape[0] / best_idx.shape[0]) + # used = 99. * len(set(best_idx)) / best_idx.shape[0] + # duplicates = 99. * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] + # changed = 99. * (1.0 - np.where(indices == best_idx)[0].shape[0] / best_idx.shape[0]) + used, duplicates, changed = -1.0, -2.0, -3.0 + err = best_val.mean() print(' {}layer{} {:>1} {}patches{} used {:2.0f}% dups {:2.0f}% chgd {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, duplicates, changed, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - self.style_data[layer][-1] = best_idx + # self.style_data[layer][-1] = best_idx return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] def evaluate_features(self, features): From 1c08f626afa6afb9dfb00b825ca73a3e8925f938 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Wed, 1 Jun 2016 10:07:51 +0200 Subject: [PATCH 43/58] Improving patch-matching performance. --- doodle.py | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/doodle.py b/doodle.py index 764bbf9..5eff5fa 100755 --- a/doodle.py +++ b/doodle.py @@ -379,7 +379,7 @@ def prepare_content(self, scale=1.0): if content_img_original is None: print(" - No content image found; seed was set to random noise.") - content_img_original = np.random.uniform(0, 64, content_map_original.shape[:2]+(3,)).astype(np.float32) + content_img_original = np.random.uniform(0, 256, content_map_original.shape[:2]+(3,)).astype(np.float32) if content_map_original.shape[2] != self.style_map.shape[1]: error("Mismatch in number of channels for style and content semantic map.", @@ -403,6 +403,7 @@ def prepare_generation(self): # Patch matching calculation that uses only pre-calculated features and a slice of the patches. self.compute_matches = {l: self.compile([self.model.pm_inputs[l], self.model.pm_buffers[l], self.model.pm_candidates[l]], self.do_match_patches(l)) for l in args.layers} + self.pm_previous = {} # Decoding intermediate features into more specialized features and all the way to the output image. self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0', self.model.tensor_img)] @@ -452,18 +453,40 @@ def evaluate_patches(self, l, f, v): self.normalize_components(l, buffers, self.style_data[l][1:2]) self.normalize_components(l, f, self.compute_norms(np, l, f)) - SAMPLES = 64 + SAMPLES = 128 indices = np.zeros((f.shape[2]-2, f.shape[3]-2, SAMPLES, 3), dtype=np.int32) # TODO: patchsize - indices[:,:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:3]) # TODO: patchsize - indices[:,:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:3]) # TODO: patchsize - identity = np.indices(buffers.shape[2:]).transpose((1,2,0))[:-2,:-2] + 1 # TODO: patchsize - indices[:,:,17,1:] = identity + ref_idx = self.pm_previous.get(l, None) + for _ in range(4): + indices[:,:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:3]) # TODO: patchsize + indices[:,:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:3]) # TODO: patchsize - best_idx, best_val = self.compute_matches[l](f, buffers, indices) - # Numpy array indexing rules seem to require injecting the identity matrix back into the array. - accessors = np.concatenate([identity - 1, best_idx[:,:,np.newaxis]], axis=2) - ref_idx = indices[accessors[:,:,0],accessors[:,:,1],accessors[:,:,2]] + if ref_idx is not None: + indices[:,:,0,:] = ref_idx + (0,0,0) + indices[:,:,1,:] = ref_idx + (0,0,+1) + indices[:,:,2,:] = ref_idx + (0,+1,0) + indices[:,:,3,:] = ref_idx + (0,0,-1) + indices[:,:,4,:] = ref_idx + (0,-1,0) + + indices[:-1,:,5,:] = ref_idx[+1:,:] + (0,-1,0) + indices[+1:,:,6,:] = ref_idx[:-1,:] + (0,+1,0) + indices[:,:-1,7,:] = ref_idx[:,+1:] + (0,0,-1) + indices[:,+1:,8,:] = ref_idx[:,:-1] + (0,0,+1) + + indices[:,:,:9,1].clip(1, buffers.shape[2]-2, out=indices[:,:,:9,1]) + indices[:,:,:9,2].clip(1, buffers.shape[3]-2, out=indices[:,:,:9,2]) + + best_idx, best_val = self.compute_matches[l](f, buffers, indices) + + # Numpy array indexing rules seem to require injecting the identity matrix back into the array. + identity = np.indices(f.shape[2:]).transpose((1,2,0))[:-2,:-2] + 1 # TODO: patchsize + accessors = np.concatenate([identity - 1, best_idx[:,:,np.newaxis]], axis=2) + ref_idx = indices[accessors[:,:,0],accessors[:,:,1],accessors[:,:,2]] + + best_val *= 9.0 + print('values', ref_idx.shape, best_val.min(), best_val.mean(), best_val.max()) + + self.pm_previous[l] = ref_idx return ref_idx, best_val def evaluate_feature(self, layer, feature, variety=0.0): From c4e6ceb803673d75e491691e461bf28331a9e9b8 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 3 Jun 2016 10:25:01 +0200 Subject: [PATCH 44/58] Slow version of 3x3 patch-matching. --- doodle.py | 51 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/doodle.py b/doodle.py index 5eff5fa..50dcc93 100755 --- a/doodle.py +++ b/doodle.py @@ -49,7 +49,6 @@ add_arg('--output', default='output.png', type=str, help='Filename or path to save output once done.') add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') add_arg('--frames', default=False, action='store_true', help='Render intermediate frames, takes more time.') -add_arg('--slices', default=16, type=int, help='Split patches up into this number of batches.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') args = parser.parse_args() @@ -295,10 +294,10 @@ def load_images(self, name, filename, scale=1.0): .format(filename, map.shape[1::-1], mapname, img.shape[1::-1])) return [(self.rescale_image(i, scale) if i is not None else None) for i in [img, map]] - def compile(self, arguments, function): + def compile(self, arguments, function, **opts): """Build a Theano function that will run the specified expression on the GPU. """ - return theano.function(list(arguments), function, on_unused_input='ignore', allow_input_downcast=True) + return theano.function(list(arguments), function, on_unused_input='ignore', allow_input_downcast=True, **opts) def compute_norms(self, backend, layer, array): ni = backend.sqrt(backend.sum(array[:,:self.model.channels[layer]] ** 2.0, axis=(1,), keepdims=True)) @@ -427,21 +426,30 @@ def prepare_generation(self): # Theano Computation #------------------------------------------------------------------------------------------------------------------ - def do_extract_patches(self, layer, output, stride=1): + def do_extract_patches(self, layer, output): """This function builds a Theano expression that will get compiled an run on the GPU. It extracts 3x3 patches from the intermediate outputs in the model. """ return [output] + self.compute_norms(T, layer, output) - def do_match_patches(self, layer): + def do_match_patches(self, layer, size=3, stride=1): inputs = self.model.pm_inputs[layer] buffers = self.model.pm_buffers[layer] indices = self.model.pm_candidates[layer] - candidates = buffers[indices[:,:,:,0],:,indices[:,:,:,1],indices[:,:,:,2]].dimshuffle((3,0,1,2)) - reference = inputs[0,:,1:-1,1:-1].dimshuffle((0,1,2,'x')) - scores = T.sum(candidates * reference, axis=(0)) - return [scores.argmax(axis=(2)), scores.max(axis=(2))] # [scores.argmax(axis=0), scores.max(axis=0)] + patches = theano.tensor.nnet.neighbours.images2neibs(inputs, (size, size), (stride, stride), mode='valid') + patches = patches.reshape((-1, patches.shape[0] // inputs.shape[1], size, size)).dimshuffle((1, 0, 2, 3)) + patches = patches.reshape((inputs.shape[2]-2, inputs.shape[3]-2, patches.shape[1], patches.shape[2], patches.shape[3])) + + B = buffers.reshape((buffers.shape[0], buffers.shape[1], buffers.shape[2], buffers.shape[3], 1, 1)) + i0, i1, i2 = indices[:,:,:,0], indices[:,:,:,1], indices[:,:,:,2] + candidates = T.concatenate([T.concatenate([B[i0,:,i1-1,i2-1], B[i0,:,i1-1,i2+0], B[i0,:,i1-1,i2+1]], axis=5), + T.concatenate([B[i0,:,i1+0,i2-1], B[i0,:,i1+0,i2+0], B[i0,:,i1+0,i2+1]], axis=5), + T.concatenate([B[i0,:,i1+1,i2-1], B[i0,:,i1+1,i2+0], B[i0,:,i1+1,i2+1]], axis=5)], axis=4) + + reference = patches.dimshuffle((0,1,'x',2,3,4)) + scores = T.sum(candidates * reference, axis=(3,4,5)) + return [scores.argmax(axis=(2)), scores.max(axis=(2))] #------------------------------------------------------------------------------------------------------------------ @@ -453,11 +461,11 @@ def evaluate_patches(self, l, f, v): self.normalize_components(l, buffers, self.style_data[l][1:2]) self.normalize_components(l, f, self.compute_norms(np, l, f)) - SAMPLES = 128 + SAMPLES = 24 indices = np.zeros((f.shape[2]-2, f.shape[3]-2, SAMPLES, 3), dtype=np.int32) # TODO: patchsize ref_idx = self.pm_previous.get(l, None) - for _ in range(4): + for i in range(2 if l > 3 else 1): indices[:,:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:3]) # TODO: patchsize indices[:,:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:3]) # TODO: patchsize @@ -475,16 +483,29 @@ def evaluate_patches(self, l, f, v): indices[:,:,:9,1].clip(1, buffers.shape[2]-2, out=indices[:,:,:9,1]) indices[:,:,:9,2].clip(1, buffers.shape[3]-2, out=indices[:,:,:9,2]) - - best_idx, best_val = self.compute_matches[l](f, buffers, indices) + + prev_idx = self.pm_previous.get(l+1, None) + if i == 0 and prev_idx is not None: + resh_idx = np.concatenate([scipy.ndimage.zoom(np.pad(prev_idx[:,:,i]*2, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] for i in range(1, 3)], axis=(2)) + indices[:,:,10,1:] = resh_idx[+1:-1,+1:-1] + indices[:,:,11,1:] = resh_idx[+2: ,+2: ] + indices[:,:,12,1:] = resh_idx[+2: , :-2] + indices[:,:,13,1:] = resh_idx[ :-2,+2: ] + indices[:,:,14,1:] = resh_idx[ :-2, :-2] + + indices[:,:,10:15,1].clip(1, buffers.shape[2]-2, out=indices[:,:,10:15,1]) + indices[:,:,10:15,2].clip(1, buffers.shape[3]-2, out=indices[:,:,10:15,2]) + + # t = time.time() + best_idx, best_val = self.compute_matches[l](f, buffers, indices) + # print('delta', time.time() - t) # Numpy array indexing rules seem to require injecting the identity matrix back into the array. identity = np.indices(f.shape[2:]).transpose((1,2,0))[:-2,:-2] + 1 # TODO: patchsize accessors = np.concatenate([identity - 1, best_idx[:,:,np.newaxis]], axis=2) ref_idx = indices[accessors[:,:,0],accessors[:,:,1],accessors[:,:,2]] - best_val *= 9.0 - print('values', ref_idx.shape, best_val.min(), best_val.mean(), best_val.max()) + print('values', ref_idx.shape, np.percentile(best_val, 5.0), best_val.mean(), np.percentile(best_val, 95.0)) self.pm_previous[l] = ref_idx return ref_idx, best_val From 99ec0a7822c5969c548d086e929c44a3124e5330 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 3 Jun 2016 14:33:21 +0200 Subject: [PATCH 45/58] Minor performance improvements, major memory improvements. Sub-tensor indexing seems to use a Python function on CPU, possibly offloaded to GPU with CUDA though. --- doodle.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/doodle.py b/doodle.py index 50dcc93..55996ba 100755 --- a/doodle.py +++ b/doodle.py @@ -306,6 +306,7 @@ def compute_norms(self, backend, layer, array): def normalize_components(self, layer, array, norms): if args.semantic_weight > 0.0: + print(layer, self.model.channels, len(norms)) array[:,self.model.channels[layer]:] /= (norms[1] * args.semantic_weight) array[:,:self.model.channels[layer]] /= (norms[0] * 3.0) @@ -437,18 +438,13 @@ def do_match_patches(self, layer, size=3, stride=1): buffers = self.model.pm_buffers[layer] indices = self.model.pm_candidates[layer] - patches = theano.tensor.nnet.neighbours.images2neibs(inputs, (size, size), (stride, stride), mode='valid') - patches = patches.reshape((-1, patches.shape[0] // inputs.shape[1], size, size)).dimshuffle((1, 0, 2, 3)) - patches = patches.reshape((inputs.shape[2]-2, inputs.shape[3]-2, patches.shape[1], patches.shape[2], patches.shape[3])) - - B = buffers.reshape((buffers.shape[0], buffers.shape[1], buffers.shape[2], buffers.shape[3], 1, 1)) + scores = 0.0 i0, i1, i2 = indices[:,:,:,0], indices[:,:,:,1], indices[:,:,:,2] - candidates = T.concatenate([T.concatenate([B[i0,:,i1-1,i2-1], B[i0,:,i1-1,i2+0], B[i0,:,i1-1,i2+1]], axis=5), - T.concatenate([B[i0,:,i1+0,i2-1], B[i0,:,i1+0,i2+0], B[i0,:,i1+0,i2+1]], axis=5), - T.concatenate([B[i0,:,i1+1,i2-1], B[i0,:,i1+1,i2+0], B[i0,:,i1+1,i2+1]], axis=5)], axis=4) - - reference = patches.dimshuffle((0,1,'x',2,3,4)) - scores = T.sum(candidates * reference, axis=(3,4,5)) + h, w = inputs.shape[2]-1, inputs.shape[3]-1 + for y, x in itertools.product([-1, 0, +1], repeat=2): + candidates = buffers[i0,:,i1+y,i2+x].dimshuffle((3,0,1,2)) + reference = inputs[0,:,1+y:h+y,1+x:w+x].dimshuffle((0,1,2,'x')) + scores += T.sum(candidates * reference, axis=(0)) return [scores.argmax(axis=(2)), scores.max(axis=(2))] @@ -458,10 +454,10 @@ def do_match_patches(self, layer, size=3, stride=1): def evaluate_patches(self, l, f, v): buffers = self.style_data[l][0].astype(np.float32) - self.normalize_components(l, buffers, self.style_data[l][1:2]) + self.normalize_components(l, buffers, self.style_data[l][1:3]) self.normalize_components(l, f, self.compute_norms(np, l, f)) - SAMPLES = 24 + SAMPLES = 20 indices = np.zeros((f.shape[2]-2, f.shape[3]-2, SAMPLES, 3), dtype=np.int32) # TODO: patchsize ref_idx = self.pm_previous.get(l, None) @@ -496,9 +492,10 @@ def evaluate_patches(self, l, f, v): indices[:,:,10:15,1].clip(1, buffers.shape[2]-2, out=indices[:,:,10:15,1]) indices[:,:,10:15,2].clip(1, buffers.shape[3]-2, out=indices[:,:,10:15,2]) - # t = time.time() - best_idx, best_val = self.compute_matches[l](f, buffers, indices) - # print('delta', time.time() - t) + t = time.time() + print('compute_matches', f.shape, buffers.shape) + best_idx, best_val = self.compute_matches[l](f, buffers, indices) + print('patch_match', time.time() - t) # Numpy array indexing rules seem to require injecting the identity matrix back into the array. identity = np.indices(f.shape[2:]).transpose((1,2,0))[:-2,:-2] + 1 # TODO: patchsize From f07a18801ff9c72d0d0155003529e964cf56a6ab Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Thu, 9 Jun 2016 22:37:25 +0200 Subject: [PATCH 46/58] Increased iteration count for PatchMatch, new network size with potentially better training. --- doodle.py | 63 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/doodle.py b/doodle.py index 55996ba..44b5af9 100755 --- a/doodle.py +++ b/doodle.py @@ -124,7 +124,7 @@ def setup_model(self, previous=None): and then adding augmentations for Semantic Style Transfer. """ net, self.channels = {}, {} - self.units = {1: 32, 2: 56, 3: 88, 4: 136, 5: 224, 6: 360} + self.units = {1: 48, 2: 80, 3: 128, 4: 208, 5: 328, 6: 360} net['map'] = InputLayer((1, None, None, None)) for j in range(6): @@ -142,19 +142,19 @@ def EncdLayer(previous, channels, filter_size, pad, stride=(1,1), nonlinearity=l # Encoder part of the neural network, takes an input image and turns it into abstract patterns. net['img'] = previous or InputLayer((None, 3, None, None)) net['enc0_0'], net['lat0'] = net['img'], net['img'] - net['enc1_1'] = EncdLayer('0_0', 32, 3, pad=1) - net['enc1_2'] = EncdLayer('1_1', 32, 3, pad=1) - net['enc2_1'] = EncdLayer('1_2', 56, 2, pad=0, stride=(2,2)) - net['enc2_2'] = EncdLayer('2_1', 56, 3, pad=1) - net['enc3_1'] = EncdLayer('2_2', 88, 2, pad=0, stride=(2,2)) - net['enc3_2'] = EncdLayer('3_1', 88, 3, pad=1) - net['enc3_3'] = EncdLayer('3_2', 88, 3, pad=1) - net['enc4_1'] = EncdLayer('3_3', 136, 2, pad=0, stride=(2,2)) - net['enc4_2'] = EncdLayer('4_1', 136, 3, pad=1) - net['enc4_3'] = EncdLayer('4_2', 136, 3, pad=1) - net['enc5_1'] = EncdLayer('4_3', 224, 2, pad=0, stride=(2,2)) - net['enc5_2'] = EncdLayer('5_1', 224, 3, pad=1) - net['enc5_3'] = EncdLayer('5_2', 224, 3, pad=1) + net['enc1_1'] = EncdLayer('0_0', 48, 3, pad=1) + net['enc1_2'] = EncdLayer('1_1', 48, 3, pad=1) + net['enc2_1'] = EncdLayer('1_2', 80, 2, pad=0, stride=(2,2)) + net['enc2_2'] = EncdLayer('2_1', 80, 3, pad=1) + net['enc3_1'] = EncdLayer('2_2', 128, 2, pad=0, stride=(2,2)) + net['enc3_2'] = EncdLayer('3_1', 128, 3, pad=1) + net['enc3_3'] = EncdLayer('3_2', 128, 3, pad=1) + net['enc4_1'] = EncdLayer('3_3', 208, 2, pad=0, stride=(2,2)) + net['enc4_2'] = EncdLayer('4_1', 208, 3, pad=1) + net['enc4_3'] = EncdLayer('4_2', 208, 3, pad=1) + net['enc5_1'] = EncdLayer('4_3', 328, 2, pad=0, stride=(2,2)) + net['enc5_2'] = EncdLayer('5_1', 328, 3, pad=1) + net['enc5_3'] = EncdLayer('5_2', 328, 3, pad=1) net['enc6_1'] = EncdLayer('5_3', 360, 2, pad=0, stride=(2,2)) def DecdLayer(copy, previous, channels, nonlinearity=lasagne.nonlinearities.elu): @@ -163,19 +163,19 @@ def DecdLayer(copy, previous, channels, nonlinearity=lasagne.nonlinearities.elu) return DeconvLayer(incoming, channels, dup.filter_size, stride=dup.stride, crop=dup.pad, nonlinearity=nonlinearity) # Decoder part of the neural network, takes abstract patterns and converts them into an image! - net['dec5_3'] = DecdLayer('6_1', 'enc6_1', 224) - net['dec5_2'] = DecdLayer('5_3', 'dec5_3', 224) - net['dec5_1'] = DecdLayer('5_2', 'dec5_2', 224) - net['dec4_3'] = DecdLayer('5_1', 'dec5_1', 136) - net['dec4_2'] = DecdLayer('4_3', 'dec4_3', 136) - net['dec4_1'] = DecdLayer('4_2', 'dec4_2', 136) - net['dec3_3'] = DecdLayer('4_1', 'dec4_1', 88) - net['dec3_2'] = DecdLayer('3_3', 'dec3_3', 88) - net['dec3_1'] = DecdLayer('3_2', 'dec3_2', 88) - net['dec2_2'] = DecdLayer('3_1', 'dec3_1', 56) - net['dec2_1'] = DecdLayer('2_2', 'dec2_2', 56) - net['dec1_2'] = DecdLayer('2_1', 'dec2_1', 32) - net['dec1_1'] = DecdLayer('1_2', 'dec1_2', 32) + net['dec5_3'] = DecdLayer('6_1', 'enc6_1', 328) + net['dec5_2'] = DecdLayer('5_3', 'dec5_3', 328) + net['dec5_1'] = DecdLayer('5_2', 'dec5_2', 328) + net['dec4_3'] = DecdLayer('5_1', 'dec5_1', 208) + net['dec4_2'] = DecdLayer('4_3', 'dec4_3', 208) + net['dec4_1'] = DecdLayer('4_2', 'dec4_2', 208) + net['dec3_3'] = DecdLayer('4_1', 'dec4_1', 128) + net['dec3_2'] = DecdLayer('3_3', 'dec3_3', 128) + net['dec3_1'] = DecdLayer('3_2', 'dec3_2', 128) + net['dec2_2'] = DecdLayer('3_1', 'dec3_1', 80) + net['dec2_1'] = DecdLayer('2_2', 'dec2_2', 80) + net['dec1_2'] = DecdLayer('2_1', 'dec2_1', 48) + net['dec1_1'] = DecdLayer('1_2', 'dec1_2', 48) net['dec0_1'] = DecdLayer('1_1', 'dec1_1', 3, nonlinearity=lasagne.nonlinearities.tanh) net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec0_1'], shared_axes=(0,1,2,3)) net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) @@ -457,11 +457,11 @@ def evaluate_patches(self, l, f, v): self.normalize_components(l, buffers, self.style_data[l][1:3]) self.normalize_components(l, f, self.compute_norms(np, l, f)) - SAMPLES = 20 + SAMPLES = 32 indices = np.zeros((f.shape[2]-2, f.shape[3]-2, SAMPLES, 3), dtype=np.int32) # TODO: patchsize ref_idx = self.pm_previous.get(l, None) - for i in range(2 if l > 3 else 1): + for i in range(16 if l > 3 else 8): indices[:,:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:3]) # TODO: patchsize indices[:,:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:3]) # TODO: patchsize @@ -540,12 +540,15 @@ def evaluate_feature(self, layer, feature, variety=0.0): def evaluate_features(self, features): params = zip(*[extend(a) for a in [args.content_weight, args.noise_weight, args.variety, args.iterations]]) result = [] - for l, f, c, p in zip(args.layers, features, self.content_features, params): + for i, (l, c, p) in enumerate(zip(args.layers, self.content_features, params)): + f = features[i] content_weight, noise_weight, variety, iterations = p for _ in range(iterations): feature = f * (1.0 - content_weight) + c * content_weight \ + np.random.normal(0.0, 1.0, size=f.shape).astype(np.float32) * (0.1 * noise_weight) f = self.evaluate_feature(l, feature, variety) + if i+1 < len(features): + features[i+1] = self.decoders[i](f, self.content_map) result.append(f) return result From 3a0418efbb201b9b2300447e542b8d214b79d531 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Fri, 10 Jun 2016 01:33:33 +0200 Subject: [PATCH 47/58] Cleaning up patch-matching, stops at 10% improve threshold. --- doodle.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/doodle.py b/doodle.py index 44b5af9..e169e9f 100755 --- a/doodle.py +++ b/doodle.py @@ -457,11 +457,11 @@ def evaluate_patches(self, l, f, v): self.normalize_components(l, buffers, self.style_data[l][1:3]) self.normalize_components(l, f, self.compute_norms(np, l, f)) - SAMPLES = 32 + SAMPLES = 24 indices = np.zeros((f.shape[2]-2, f.shape[3]-2, SAMPLES, 3), dtype=np.int32) # TODO: patchsize - ref_idx = self.pm_previous.get(l, None) - for i in range(16 if l > 3 else 8): + ref_idx, ref_val = self.pm_previous.get(l, (None, 0.0)) + for i in itertools.count(): indices[:,:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:3]) # TODO: patchsize indices[:,:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:3]) # TODO: patchsize @@ -480,8 +480,9 @@ def evaluate_patches(self, l, f, v): indices[:,:,:9,1].clip(1, buffers.shape[2]-2, out=indices[:,:,:9,1]) indices[:,:,:9,2].clip(1, buffers.shape[3]-2, out=indices[:,:,:9,2]) - prev_idx = self.pm_previous.get(l+1, None) - if i == 0 and prev_idx is not None: + previous = self.pm_previous.get(l+1, None) + if i == 0 and previous is not None: + prev_idx, ref_val = previous resh_idx = np.concatenate([scipy.ndimage.zoom(np.pad(prev_idx[:,:,i]*2, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] for i in range(1, 3)], axis=(2)) indices[:,:,10,1:] = resh_idx[+1:-1,+1:-1] indices[:,:,11,1:] = resh_idx[+2: ,+2: ] @@ -493,7 +494,6 @@ def evaluate_patches(self, l, f, v): indices[:,:,10:15,2].clip(1, buffers.shape[3]-2, out=indices[:,:,10:15,2]) t = time.time() - print('compute_matches', f.shape, buffers.shape) best_idx, best_val = self.compute_matches[l](f, buffers, indices) print('patch_match', time.time() - t) @@ -502,9 +502,13 @@ def evaluate_patches(self, l, f, v): accessors = np.concatenate([identity - 1, best_idx[:,:,np.newaxis]], axis=2) ref_idx = indices[accessors[:,:,0],accessors[:,:,1],accessors[:,:,2]] - print('values', ref_idx.shape, np.percentile(best_val, 5.0), best_val.mean(), np.percentile(best_val, 95.0)) + rv = np.percentile(best_val, 5.0) + print('values', ref_idx.shape, (rv - ref_val) / rv) + if (rv - ref_val) / rv < 0.1: + break + ref_val = rv - self.pm_previous[l] = ref_idx + self.pm_previous[l] = (ref_idx, ref_val) return ref_idx, best_val def evaluate_feature(self, layer, feature, variety=0.0): From 21da45f9ccc37f39b1c0f341d0dcd6b09e7d59b0 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 12 Jun 2016 18:33:53 +0200 Subject: [PATCH 48/58] Working layerwise generation with exchanges between layers as soon as possible, making sure information propagates down immediately as in feed-forward model. --- doodle.py | 72 +++++++++++++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/doodle.py b/doodle.py index e169e9f..5ca479e 100755 --- a/doodle.py +++ b/doodle.py @@ -124,7 +124,7 @@ def setup_model(self, previous=None): and then adding augmentations for Semantic Style Transfer. """ net, self.channels = {}, {} - self.units = {1: 48, 2: 80, 3: 128, 4: 208, 5: 328, 6: 360} + self.units = {1: 48, 2: 80, 3: 128, 4: 208, 5: 328, 6: 536} net['map'] = InputLayer((1, None, None, None)) for j in range(6): @@ -155,7 +155,7 @@ def EncdLayer(previous, channels, filter_size, pad, stride=(1,1), nonlinearity=l net['enc5_1'] = EncdLayer('4_3', 328, 2, pad=0, stride=(2,2)) net['enc5_2'] = EncdLayer('5_1', 328, 3, pad=1) net['enc5_3'] = EncdLayer('5_2', 328, 3, pad=1) - net['enc6_1'] = EncdLayer('5_3', 360, 2, pad=0, stride=(2,2)) + net['enc6_1'] = EncdLayer('5_3', 536, 2, pad=0, stride=(2,2)) def DecdLayer(copy, previous, channels, nonlinearity=lasagne.nonlinearities.elu): # Dynamically injects intermediate "pitstop" output layers in the decoder based on what the user specified as layers. @@ -404,6 +404,9 @@ def prepare_generation(self): self.compute_matches = {l: self.compile([self.model.pm_inputs[l], self.model.pm_buffers[l], self.model.pm_candidates[l]], self.do_match_patches(l)) for l in args.layers} self.pm_previous = {} + LayerInput = collections.namedtuple('LayerInput', ['array', 'weight']) + self.layer_inputs = [[LayerInput(self.content_features[i], w) for _, w in zip(args.layers, extend(args.layer_weight))] + for i, _ in enumerate(args.layers)] # Decoding intermediate features into more specialized features and all the way to the output image. self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0', self.model.tensor_img)] @@ -503,8 +506,8 @@ def evaluate_patches(self, l, f, v): ref_idx = indices[accessors[:,:,0],accessors[:,:,1],accessors[:,:,2]] rv = np.percentile(best_val, 5.0) - print('values', ref_idx.shape, (rv - ref_val) / rv) - if (rv - ref_val) / rv < 0.1: + print('values', ref_idx.shape, (rv - ref_val) / rv, rv) + if (rv - ref_val) / rv < 0.05: break ref_val = rv @@ -541,49 +544,44 @@ def evaluate_feature(self, layer, feature, variety=0.0): # self.style_data[layer][-1] = best_idx return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] - def evaluate_features(self, features): + def evaluate_features(self): params = zip(*[extend(a) for a in [args.content_weight, args.noise_weight, args.variety, args.iterations]]) - result = [] + for i, (l, c, p) in enumerate(zip(args.layers, self.content_features, params)): - f = features[i] content_weight, noise_weight, variety, iterations = p - for _ in range(iterations): - feature = f * (1.0 - content_weight) + c * content_weight \ - + np.random.normal(0.0, 1.0, size=f.shape).astype(np.float32) * (0.1 * noise_weight) - f = self.evaluate_feature(l, feature, variety) - if i+1 < len(features): - features[i+1] = self.decoders[i](f, self.content_map) - result.append(f) - return result - - def evaluate_exchange(self, features): - decoded, encoded = features, features - weights = {f.shape: w for f, w in zip(features, extend(args.layer_weight))} - ready = {f.shape: [(f, weights[f.shape])] for f in features} - for i in range(len(features)-1): - decoded = [decode(data, self.content_map) for decode, data in zip(self.decoders[+i:len(self.decoders)], decoded[:-1])] - encoded = [encode(data, self.content_map) for encode, data in zip(self.encoders[:len(self.encoders)-i], encoded[+1:])] - for d in decoded: ready[d.shape].append((d, weights[d.shape])) - for e in encoded: ready[e.shape].append((e, weights[d.shape])) - return [sum([a*w for a, w in ready.get(f.shape, [(f,1.0)])]) / sum([w for _, w in ready.get(f.shape, [(f,1.0)])]) for f in features] + for j in range(iterations): + blended = sum([a*w for a, w in self.layer_inputs[i]]) / sum([w for _, w in self.layer_inputs[i]]) + self.render(blended, l, 'blended-L{}I{}'.format(l, j+1)) + feature = blended * (1.0 - content_weight) + c * content_weight \ + + np.random.normal(0.0, 1.0, size=c.shape).astype(np.float32) * (0.1 * noise_weight) + self.render(feature, l, 'mixed-L{}I{}'.format(l, j+1)) + result = self.evaluate_feature(l, feature, variety) + self.render(result, l, 'output-L{}I{}'.format(l, j+1)) + self.layer_inputs[i][i].array[:] = result + + if i+1 < len(args.layers): + for j in range(0, i+1): + self.layer_inputs[i+1][j].array[:] = self.decoders[i](self.layer_inputs[i][j].array, self.content_map) + + for i in range(len(args.layers)-1, 0, -1): + for j in range(i, len(args.layers)): + self.layer_inputs[i-1][j].array[:] = self.encoders[i-1](self.layer_inputs[i][j].array, self.content_map) def evaluate(self, Xn): """Feed-forward evaluation of the output based on current image. Can be called multiple times. """ - frame = 0 - current_features = [np.copy(f) for f in self.content_features] - self.render(frame, args.layers[0], current_features[0]) + self.frame = 0 + for i, c in zip(args.layers, self.content_features): + self.render(c, i, 'orig-L{}'.format(i)) for j in range(args.passes): - frame += 1 - print('\n{}Pass #{}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) - current_features = self.evaluate_features(current_features) - current_features = self.evaluate_exchange(current_features) - self.render(frame, args.layers[-1], current_features[-1]) + self.frame += 1 + print('\n{}Pass #{}{}: variety {}, weights {}.{}'.format(ansi.CYAN_B, self.frame, ansi.CYAN, 0.0, 0.0, ansi.ENDC)) + self.evaluate_features() - return self.decoders[-1](current_features[-1], self.content_map) + return self.decoders[-1](self.layer_inputs[-1][-1].array, self.content_map) - def render(self, frame, layer, features): + def render(self, features, layer, suffix): """Decode features at a specific layer and save the result to disk for visualization. (Takes 50% more time.) """ if not args.frames: return @@ -592,7 +590,7 @@ def render(self, frame, layer, features): output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_shape) filename = os.path.splitext(os.path.basename(args.output))[0] - scipy.misc.toimage(output, cmin=0, cmax=255).save('frames/{}-{:03d}.png'.format(filename, frame)) + scipy.misc.toimage(output, cmin=0, cmax=255).save('frames/{}-{:03d}-{}.png'.format(filename, self.frame, suffix)) def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. From 8ca47875fdf04654f2dcd9be5300cbd45051729f Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Mon, 13 Jun 2016 14:16:53 +0200 Subject: [PATCH 49/58] Switch to JIT-compiled patch-matching. --- doodle.py | 57 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/doodle.py b/doodle.py index 5ca479e..18d6d0a 100755 --- a/doodle.py +++ b/doodle.py @@ -50,6 +50,7 @@ add_arg('--output-size', default=None, type=str, help='Size of the output image, e.g. 512x512.') add_arg('--frames', default=False, action='store_true', help='Render intermediate frames, takes more time.') add_arg('--device', default='cpu', type=str, help='Index of the GPU number to use, for theano.') +add_arg('--model', default='gelu3', type=str, help='Filename for convolution weights of neural network.') args = parser.parse_args() @@ -88,6 +89,7 @@ def snap(value, grid=2**(args.layers[0]-1)): return int(grid * math.floor(value # Scientific & Imaging Libraries import numpy as np import scipy.optimize, scipy.ndimage, scipy.misc +import numba import PIL.ImageOps from sklearn.feature_extraction.image import reconstruct_from_patches_2d @@ -197,10 +199,10 @@ def ConcatenateLayer(incoming, layer): def load_data(self): """Open the serialized parameters from a pre-trained network, and load them into the model created. """ - data_file = os.path.join(os.path.dirname(__file__), 'gelu3_conv.pkl') + data_file = os.path.join(os.path.dirname(__file__), '{}_conv.pkl'.format(args.model)) if not os.path.exists(data_file): error("Model file with pre-trained convolution layers not found. Download from here...", - "https://github.com/alexjc/neural-doodle/releases/download/v0.0/gelu3_conv.pkl") + "https://github.com/alexjc/neural-doodle/releases/download/v0.0/{}_conv.pkl".format(args.model)) data = pickle.load(open(data_file, 'rb')) for layer, values in data.items(): @@ -240,6 +242,23 @@ def finalize_image(self, image, resolution): return scipy.misc.imresize(image, resolution, interp='bicubic') + +@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:,:], numba.int32[:,:], numba.float32[:,:])], + '(n,c,x,y),(n,c,z,w),(a,b,m,i)->(a,b),(a,b)', nopython=True, target='parallel') +def compute_matches(inputs, buffers, indices, selected, best): + for a in range(indices.shape[0]): + for b in range(indices.shape[1]): + scores = np.zeros((indices.shape[2],), dtype=np.float32) + for m in range(indices.shape[2]): + i0, i1, i2 = indices[a,b,m,0], indices[a,b,m,1], indices[a,b,m,2] + for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: + candidates = buffers[i0,:,i1+y,i2+x] + reference = inputs[0,:,1+a+y,1+b+x] + scores[m] += np.sum(candidates * reference) + selected[a,b] = scores.argmax() + best[a,b] = scores.max() + + #---------------------------------------------------------------------------------------------------------------------- # Semantic Style Transfer #---------------------------------------------------------------------------------------------------------------------- @@ -394,19 +413,23 @@ def prepare_content(self, scale=1.0): for layer, encoder in reversed(list(zip(args.layers, self.encoders))): feature, *_ = encoder(feature, self.content_map) feature = feature[:,:self.model.channels[layer]] - self.content_features.insert(0, feature) + style = self.style_data[layer][0] + + sn, sx = style.min(axis=(0,2,3), keepdims=True), style.max(axis=(0,2,3), keepdims=True) + cn, cx = feature.min(axis=(0,2,3), keepdims=True), feature.max(axis=(0,2,3), keepdims=True) + + self.content_features.insert(0, sn + (feature - cn) * (sx - sn) / (cx - cn)) print(" - Layer {} as {} array in {:,}kb.".format(layer, feature.shape[1:], feature.size//1000)) def prepare_generation(self): """Layerwise synthesis images requires two sets of Theano functions to be compiled. """ # Patch matching calculation that uses only pre-calculated features and a slice of the patches. - self.compute_matches = {l: self.compile([self.model.pm_inputs[l], self.model.pm_buffers[l], self.model.pm_candidates[l]], - self.do_match_patches(l)) for l in args.layers} + # self.compute_matches = {l: self.compile([self.model.pm_inputs[l], self.model.pm_buffers[l], self.model.pm_candidates[l]], self.do_match_patches(l)) for l in args.layers} self.pm_previous = {} LayerInput = collections.namedtuple('LayerInput', ['array', 'weight']) - self.layer_inputs = [[LayerInput(self.content_features[i], w) for _, w in zip(args.layers, extend(args.layer_weight))] - for i, _ in enumerate(args.layers)] + self.layer_inputs = [[LayerInput(np.copy(self.content_features[i]), w) for _, w in zip(args.layers, extend(args.layer_weight))] + for i, _ in enumerate(args.layers)] # Decoding intermediate features into more specialized features and all the way to the output image. self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0', self.model.tensor_img)] @@ -436,20 +459,6 @@ def do_extract_patches(self, layer, output): """ return [output] + self.compute_norms(T, layer, output) - def do_match_patches(self, layer, size=3, stride=1): - inputs = self.model.pm_inputs[layer] - buffers = self.model.pm_buffers[layer] - indices = self.model.pm_candidates[layer] - - scores = 0.0 - i0, i1, i2 = indices[:,:,:,0], indices[:,:,:,1], indices[:,:,:,2] - h, w = inputs.shape[2]-1, inputs.shape[3]-1 - for y, x in itertools.product([-1, 0, +1], repeat=2): - candidates = buffers[i0,:,i1+y,i2+x].dimshuffle((3,0,1,2)) - reference = inputs[0,:,1+y:h+y,1+x:w+x].dimshuffle((0,1,2,'x')) - scores += T.sum(candidates * reference, axis=(0)) - return [scores.argmax(axis=(2)), scores.max(axis=(2))] - #------------------------------------------------------------------------------------------------------------------ # Optimization Loop @@ -497,7 +506,7 @@ def evaluate_patches(self, l, f, v): indices[:,:,10:15,2].clip(1, buffers.shape[3]-2, out=indices[:,:,10:15,2]) t = time.time() - best_idx, best_val = self.compute_matches[l](f, buffers, indices) + best_idx, best_val = compute_matches(f, buffers, indices) print('patch_match', time.time() - t) # Numpy array indexing rules seem to require injecting the identity matrix back into the array. @@ -506,8 +515,8 @@ def evaluate_patches(self, l, f, v): ref_idx = indices[accessors[:,:,0],accessors[:,:,1],accessors[:,:,2]] rv = np.percentile(best_val, 5.0) - print('values', ref_idx.shape, (rv - ref_val) / rv, rv) - if (rv - ref_val) / rv < 0.05: + print('values', ref_idx.shape, (rv - ref_val) / rv, rv - ref_val, rv) + if abs(rv - ref_val) < 0.00005: break ref_val = rv From 3f81d50db6c8d7bb77eb7e470e2859c671df21cb Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Tue, 14 Jun 2016 10:10:11 +0200 Subject: [PATCH 50/58] Improved patch-matching using numba and JIT-compiled gu-functions. --- doodle.py | 172 +++++++++++++++++++++++++++--------------------------- 1 file changed, 86 insertions(+), 86 deletions(-) diff --git a/doodle.py b/doodle.py index 18d6d0a..4dcd7d0 100755 --- a/doodle.py +++ b/doodle.py @@ -24,6 +24,7 @@ import math import time import pickle +import random import argparse import itertools import collections @@ -242,21 +243,56 @@ def finalize_image(self, image, resolution): return scipy.misc.imresize(image, resolution, interp='bicubic') +#---------------------------------------------------------------------------------------------------------------------- +# Fast Patch Matching +#---------------------------------------------------------------------------------------------------------------------- -@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:,:], numba.int32[:,:], numba.float32[:,:])], - '(n,c,x,y),(n,c,z,w),(a,b,m,i)->(a,b),(a,b)', nopython=True, target='parallel') -def compute_matches(inputs, buffers, indices, selected, best): - for a in range(indices.shape[0]): - for b in range(indices.shape[1]): - scores = np.zeros((indices.shape[2],), dtype=np.float32) - for m in range(indices.shape[2]): - i0, i1, i2 = indices[a,b,m,0], indices[a,b,m,1], indices[a,b,m,2] +@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:])], + '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b)', nopython=True, target='parallel') +def patches_initialize(current, buffers, indices, scores): + for b in range(indices.shape[0]): + for a in range(indices.shape[1]): + i0, i1, i2 = indices[b,a] + score = 0.0 + for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: + score += np.sum(buffers[i0,:,i1+y,i2+x] * current[0,:,1+b+y,1+a+x]) + scores[b,a] = score + +@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:], numba.float32[:])], + '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b),()', nopython=True) +def patches_propagate(current, buffers, indices, scores, i): + even = bool((i[0]%2)==0) + for b in range(0, indices.shape[0]) if even else range(indices.shape[0]-1, -1, -1): + for a in range(0, indices.shape[1]) if even else range(indices.shape[1]-1, -1, -1): + for offset in [(0, 0, -1 if even else +1), (0, -1 if even else +1, 0)]: + i0, i1, i2 = indices[min(indices.shape[0]-1, max(b+offset[1], 0)), min(indices.shape[1]-1, max(a+offset[2], 0))]\ + - np.array(offset, dtype=np.int32) + i1 = min(buffers.shape[2]-2, max(i1, 1)) + i2 = min(buffers.shape[3]-2, max(i2, 1)) + score = 0.0 + for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: + score += np.sum(buffers[i0,:,i1+y,i2+x] * current[0,:,1+b+y,1+a+x]) + if score > scores[b,a]: + scores[b,a] = score + indices[b,a] = np.array((i0, i1, i2), dtype=np.int32) + +@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:])], + '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b)', nopython=True, target='parallel') +def patches_search(current, buffers, indices, scores): + for b in range(indices.shape[0]): + for a in range(indices.shape[1]): + i0, i1, i2 = indices[b,a] + for w in range(8): + # i1 = min(buffers.shape[2]-2, max(i1 + random.randint(-w, +w), 1)) + # i2 = min(buffers.shape[3]-2, max(i2 + random.randint(-w, +w), 1)) + i1 = random.randint(1,buffers.shape[2]-2) + i2 = random.randint(1,buffers.shape[3]-2) + score = 0.0 for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: - candidates = buffers[i0,:,i1+y,i2+x] - reference = inputs[0,:,1+a+y,1+b+x] - scores[m] += np.sum(candidates * reference) - selected[a,b] = scores.argmax() - best[a,b] = scores.max() + score += np.sum(buffers[i0,:,i1+y,i2+x] * current[0,:,1+b+y,1+a+x]) + if score > scores[b,a]: + scores[b,a] = score + indices[b,a] = np.array((i0, i1, i2), dtype=np.int32) #---------------------------------------------------------------------------------------------------------------------- @@ -357,17 +393,16 @@ def prepare_style(self, scale=1.0): output = lasagne.layers.get_output(self.model.network['sem%i'%layer], {self.model.network['lat'+input]: tensor_latent, self.model.network['map']: self.model.tensor_map}) - fn = self.compile([tensor_latent, self.model.tensor_map], [output] + self.do_extract_patches(layer, output)) + fn = self.compile([tensor_latent, self.model.tensor_map], [output] + self.compute_norms(T, layer, output)) self.encoders.append(fn) - # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. + # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. self.style_data, feature = {}, self.style_img for layer, encoder in reversed(list(zip(args.layers, self.encoders))): feature, *data = encoder(feature, self.style_map) - feature, patches = feature[:,:self.model.channels[layer]], data[0] - self.style_data[layer] = [d.astype(np.float16) for d in data]\ - + [np.zeros((patches.shape[0],), dtype=np.float16), -1] - print(' - Layer {} as {} patches {} in {:,}kb.'.format(layer, patches.shape[:2], patches.shape[2:], patches.size//1000)) + self.style_data[layer] = [d.astype(np.float16) for d in [feature]+data]\ + + [np.zeros((feature.shape[0],), dtype=np.float16), -1] + print(' - Layer {} as {} patches {} in {:,}kb.'.format(layer, feature.shape[:2], feature.shape[2:], feature.size//1000)) def prepare_content(self, scale=1.0): """Called each phase of the optimization, rescale the original content image and its map to use as inputs. @@ -415,17 +450,16 @@ def prepare_content(self, scale=1.0): feature = feature[:,:self.model.channels[layer]] style = self.style_data[layer][0] - sn, sx = style.min(axis=(0,2,3), keepdims=True), style.max(axis=(0,2,3), keepdims=True) - cn, cx = feature.min(axis=(0,2,3), keepdims=True), feature.max(axis=(0,2,3), keepdims=True) - - self.content_features.insert(0, sn + (feature - cn) * (sx - sn) / (cx - cn)) + # sn, sx = style.min(axis=(0,2,3), keepdims=True), style.max(axis=(0,2,3), keepdims=True) + # cn, cx = feature.min(axis=(0,2,3), keepdims=True), feature.max(axis=(0,2,3), keepdims=True) + # self.content_features.insert(0, sn + (feature - cn) * (sx - sn) / (cx - cn)) + self.content_features.insert(0, feature) print(" - Layer {} as {} array in {:,}kb.".format(layer, feature.shape[1:], feature.size//1000)) def prepare_generation(self): """Layerwise synthesis images requires two sets of Theano functions to be compiled. """ # Patch matching calculation that uses only pre-calculated features and a slice of the patches. - # self.compute_matches = {l: self.compile([self.model.pm_inputs[l], self.model.pm_buffers[l], self.model.pm_candidates[l]], self.do_match_patches(l)) for l in args.layers} self.pm_previous = {} LayerInput = collections.namedtuple('LayerInput', ['array', 'weight']) self.layer_inputs = [[LayerInput(np.copy(self.content_features[i]), w) for _, w in zip(args.layers, extend(args.layer_weight))] @@ -449,17 +483,6 @@ def prepare_generation(self): self.decoders.append(fn) - #------------------------------------------------------------------------------------------------------------------ - # Theano Computation - #------------------------------------------------------------------------------------------------------------------ - - def do_extract_patches(self, layer, output): - """This function builds a Theano expression that will get compiled an run on the GPU. It extracts 3x3 patches - from the intermediate outputs in the model. - """ - return [output] + self.compute_norms(T, layer, output) - - #------------------------------------------------------------------------------------------------------------------ # Optimization Loop #------------------------------------------------------------------------------------------------------------------ @@ -469,59 +492,36 @@ def evaluate_patches(self, l, f, v): self.normalize_components(l, buffers, self.style_data[l][1:3]) self.normalize_components(l, f, self.compute_norms(np, l, f)) - SAMPLES = 24 - indices = np.zeros((f.shape[2]-2, f.shape[3]-2, SAMPLES, 3), dtype=np.int32) # TODO: patchsize - - ref_idx, ref_val = self.pm_previous.get(l, (None, 0.0)) - for i in itertools.count(): - indices[:,:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:3]) # TODO: patchsize - indices[:,:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:3]) # TODO: patchsize - - if ref_idx is not None: - indices[:,:,0,:] = ref_idx + (0,0,0) - indices[:,:,1,:] = ref_idx + (0,0,+1) - indices[:,:,2,:] = ref_idx + (0,+1,0) - indices[:,:,3,:] = ref_idx + (0,0,-1) - indices[:,:,4,:] = ref_idx + (0,-1,0) - - indices[:-1,:,5,:] = ref_idx[+1:,:] + (0,-1,0) - indices[+1:,:,6,:] = ref_idx[:-1,:] + (0,+1,0) - indices[:,:-1,7,:] = ref_idx[:,+1:] + (0,0,-1) - indices[:,+1:,8,:] = ref_idx[:,:-1] + (0,0,+1) - - indices[:,:,:9,1].clip(1, buffers.shape[2]-2, out=indices[:,:,:9,1]) - indices[:,:,:9,2].clip(1, buffers.shape[3]-2, out=indices[:,:,:9,2]) - - previous = self.pm_previous.get(l+1, None) - if i == 0 and previous is not None: - prev_idx, ref_val = previous - resh_idx = np.concatenate([scipy.ndimage.zoom(np.pad(prev_idx[:,:,i]*2, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] for i in range(1, 3)], axis=(2)) - indices[:,:,10,1:] = resh_idx[+1:-1,+1:-1] - indices[:,:,11,1:] = resh_idx[+2: ,+2: ] - indices[:,:,12,1:] = resh_idx[+2: , :-2] - indices[:,:,13,1:] = resh_idx[ :-2,+2: ] - indices[:,:,14,1:] = resh_idx[ :-2, :-2] - - indices[:,:,10:15,1].clip(1, buffers.shape[2]-2, out=indices[:,:,10:15,1]) - indices[:,:,10:15,2].clip(1, buffers.shape[3]-2, out=indices[:,:,10:15,2]) - - t = time.time() - best_idx, best_val = compute_matches(f, buffers, indices) - print('patch_match', time.time() - t) + scores = np.zeros((f.shape[2]-2, f.shape[3]-2), dtype=np.float32) # TODO: patchsize + indices = np.zeros((f.shape[2]-2, f.shape[3]-2, 3), dtype=np.int32) # TODO: patchsize + indices[:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:2]) # TODO: patchsize + indices[:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:2]) # TODO: patchsize + + # Identity initialization. + # indices[:,:,1:] = np.indices(f.shape[2:]).transpose((1,2,0))[1:-1,1:-1] + # Propagation test. + # indices[:,:,1:] = 1 - # Numpy array indexing rules seem to require injecting the identity matrix back into the array. - identity = np.indices(f.shape[2:]).transpose((1,2,0))[:-2,:-2] + 1 # TODO: patchsize - accessors = np.concatenate([identity - 1, best_idx[:,:,np.newaxis]], axis=2) - ref_idx = indices[accessors[:,:,0],accessors[:,:,1],accessors[:,:,2]] + t = time.time() + patches_initialize(f, buffers, indices, scores) + print('patches_initialize', time.time() - t) - rv = np.percentile(best_val, 5.0) - print('values', ref_idx.shape, (rv - ref_val) / rv, rv - ref_val, rv) - if abs(rv - ref_val) < 0.00005: - break - ref_val = rv + for i in range(24): + t = time.time() + print(np.percentile(scores, [5.0, 25.0, 75.0, 95.0])) + patches_propagate(f, buffers, indices, scores, i) + patches_search(f, buffers, indices, scores) + print('patches_propagate', i, time.time() - t) + + """" + previous = self.pm_previous.get(l+1, None) + prev_idx, ref_val = previous + resh_idx = np.concatenate([scipy.ndimage.zoom(np.pad(prev_idx[:,:,i]*2, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] for i in range(1, 3)], axis=(2)) + indices[:,:,10,1:] = resh_idx[+1:-1,+1:-1] + """ - self.pm_previous[l] = (ref_idx, ref_val) - return ref_idx, best_val + self.pm_previous[l] = (indices, scores) + return indices, scores def evaluate_feature(self, layer, feature, variety=0.0): """Compute best matching patches for this layer, then merge patches into a single feature array of same size. @@ -604,7 +604,7 @@ def render(self, features, layer, suffix): def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ - self.model.setup(layers=['enc%i_1'%l for l in args.layers] + ['sem%i'%l for l in args.layers] + ['dec%i_1'%l for l in args.layers]) + self.model.setup(layers=['enc%i_1'%l for l in args.layers] + ['sem%i'%l for l in args.layers] + ['dec%i_1'%l for l in args.layers[1:]]) self.prepare_style() self.prepare_content() self.prepare_generation() From ebe133ef30b75f4e72b257e8ad6b57c957153b9c Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Tue, 14 Jun 2016 15:38:42 +0200 Subject: [PATCH 51/58] Using previous layer's matched patches, restored display statistics. --- doodle.py | 58 +++++++++++++++++++++---------------------------------- 1 file changed, 22 insertions(+), 36 deletions(-) diff --git a/doodle.py b/doodle.py index 4dcd7d0..f76715f 100755 --- a/doodle.py +++ b/doodle.py @@ -24,7 +24,6 @@ import math import time import pickle -import random import argparse import itertools import collections @@ -276,17 +275,17 @@ def patches_propagate(current, buffers, indices, scores, i): scores[b,a] = score indices[b,a] = np.array((i0, i1, i2), dtype=np.int32) -@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:])], - '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b)', nopython=True, target='parallel') -def patches_search(current, buffers, indices, scores): +@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:], numba.int32[:])], + '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b),()', nopython=True, target='parallel') +def patches_search(current, buffers, indices, scores, k): for b in range(indices.shape[0]): for a in range(indices.shape[1]): i0, i1, i2 = indices[b,a] - for w in range(8): + for w in range(k[0]): # i1 = min(buffers.shape[2]-2, max(i1 + random.randint(-w, +w), 1)) # i2 = min(buffers.shape[3]-2, max(i2 + random.randint(-w, +w), 1)) - i1 = random.randint(1,buffers.shape[2]-2) - i2 = random.randint(1,buffers.shape[3]-2) + i1 = np.random.randint(1, buffers.shape[2]-1) + i2 = np.random.randint(1, buffers.shape[3]-1) score = 0.0 for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: score += np.sum(buffers[i0,:,i1+y,i2+x] * current[0,:,1+b+y,1+a+x]) @@ -494,31 +493,19 @@ def evaluate_patches(self, l, f, v): scores = np.zeros((f.shape[2]-2, f.shape[3]-2), dtype=np.float32) # TODO: patchsize indices = np.zeros((f.shape[2]-2, f.shape[3]-2, 3), dtype=np.int32) # TODO: patchsize - indices[:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:2]) # TODO: patchsize - indices[:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:2]) # TODO: patchsize - # Identity initialization. - # indices[:,:,1:] = np.indices(f.shape[2:]).transpose((1,2,0))[1:-1,1:-1] - # Propagation test. - # indices[:,:,1:] = 1 + previous = self.pm_previous.get(l+1, None) + if previous is not None: + def rescale(a): return scipy.ndimage.zoom(np.pad(a, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] + indices[:,:,1:] = np.concatenate([rescale(previous[0][:,:,i]*2) for i in [1,2]], axis=(2))[+1:-1,+1:-1] + else: + indices[:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:2]) # TODO: patchsize + indices[:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:2]) # TODO: patchsize - t = time.time() patches_initialize(f, buffers, indices, scores) - print('patches_initialize', time.time() - t) - - for i in range(24): - t = time.time() - print(np.percentile(scores, [5.0, 25.0, 75.0, 95.0])) + for i in range(5): patches_propagate(f, buffers, indices, scores, i) - patches_search(f, buffers, indices, scores) - print('patches_propagate', i, time.time() - t) - - """" - previous = self.pm_previous.get(l+1, None) - prev_idx, ref_val = previous - resh_idx = np.concatenate([scipy.ndimage.zoom(np.pad(prev_idx[:,:,i]*2, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] for i in range(1, 3)], axis=(2)) - indices[:,:,10,1:] = resh_idx[+1:-1,+1:-1] - """ + patches_search(f, buffers, indices, scores, 5) self.pm_previous[l] = (indices, scores) return indices, scores @@ -529,7 +516,6 @@ def evaluate_feature(self, layer, feature, variety=0.0): iter_time = time.time() B, indices = self.style_data[layer][0][:,:,:,:,np.newaxis,np.newaxis].astype(np.float32), self.style_data[layer][-1] best_idx, best_val = self.evaluate_patches(layer, feature, variety) - # better_patches = buffers[best_idx[:,:,0],:,best_idx[:,:,1],best_idx[:,:,2]] i0, i1, i2 = best_idx[:,:,0], best_idx[:,:,1], best_idx[:,:,2] better_patches = np.concatenate([np.concatenate([B[i0,:,i1-1,i2-1], B[i0,:,i1-1,i2+0], B[i0,:,i1-1,i2+1]], axis=4), @@ -540,17 +526,17 @@ def evaluate_feature(self, layer, feature, variety=0.0): better_shape = feature.shape[2:] + (feature.shape[1],) better_feature = reconstruct_from_patches_2d(better_patches, better_shape) - # used = 99. * len(set(best_idx)) / best_idx.shape[0] - # duplicates = 99. * len([v for v in np.bincount(best_idx) if v>1]) / best_idx.shape[0] - # changed = 99. * (1.0 - np.where(indices == best_idx)[0].shape[0] / best_idx.shape[0]) - used, duplicates, changed = -1.0, -2.0, -3.0 + flat_idx = np.sum(best_idx.reshape((-1,3)) * np.array([B.shape[1]*B.shape[2], B.shape[2], 1]), axis=(1)) + used = 99. * len(set(flat_idx)) / flat_idx.shape[0] + duplicates = 99. * len([v for v in np.bincount(flat_idx) if v>1]) / flat_idx.shape[0] + changed = 99. * (1.0 - np.where(indices == flat_idx)[0].shape[0] / flat_idx.shape[0]) err = best_val.mean() print(' {}layer{} {:>1} {}patches{} used {:2.0f}% dups {:2.0f}% chgd {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, duplicates, changed, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) - # self.style_data[layer][-1] = best_idx + self.style_data[layer][-1] = flat_idx return better_feature.astype(np.float32).transpose((2, 0, 1))[np.newaxis] def evaluate_features(self): @@ -560,10 +546,10 @@ def evaluate_features(self): content_weight, noise_weight, variety, iterations = p for j in range(iterations): blended = sum([a*w for a, w in self.layer_inputs[i]]) / sum([w for _, w in self.layer_inputs[i]]) - self.render(blended, l, 'blended-L{}I{}'.format(l, j+1)) + # self.render(blended, l, 'blended-L{}I{}'.format(l, j+1)) feature = blended * (1.0 - content_weight) + c * content_weight \ + np.random.normal(0.0, 1.0, size=c.shape).astype(np.float32) * (0.1 * noise_weight) - self.render(feature, l, 'mixed-L{}I{}'.format(l, j+1)) + # self.render(feature, l, 'mixed-L{}I{}'.format(l, j+1)) result = self.evaluate_feature(l, feature, variety) self.render(result, l, 'output-L{}I{}'.format(l, j+1)) self.layer_inputs[i][i].array[:] = result From 4015ade68bf62078a1a95fc776ece786d038e83a Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Tue, 14 Jun 2016 17:01:33 +0200 Subject: [PATCH 52/58] Normalization of content features and using previous pass for patch matching. --- doodle.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/doodle.py b/doodle.py index f76715f..a7d10db 100755 --- a/doodle.py +++ b/doodle.py @@ -449,10 +449,9 @@ def prepare_content(self, scale=1.0): feature = feature[:,:self.model.channels[layer]] style = self.style_data[layer][0] - # sn, sx = style.min(axis=(0,2,3), keepdims=True), style.max(axis=(0,2,3), keepdims=True) - # cn, cx = feature.min(axis=(0,2,3), keepdims=True), feature.max(axis=(0,2,3), keepdims=True) - # self.content_features.insert(0, sn + (feature - cn) * (sx - sn) / (cx - cn)) - self.content_features.insert(0, feature) + sn, sx = style.min(axis=(0,2,3), keepdims=True), style.max(axis=(0,2,3), keepdims=True) + cn, cx = feature.min(axis=(0,2,3), keepdims=True), feature.max(axis=(0,2,3), keepdims=True) + self.content_features.insert(0, sn + (feature - cn) * (sx - sn) / (cx - cn)) print(" - Layer {} as {} array in {:,}kb.".format(layer, feature.shape[1:], feature.size//1000)) def prepare_generation(self): @@ -494,18 +493,23 @@ def evaluate_patches(self, l, f, v): scores = np.zeros((f.shape[2]-2, f.shape[3]-2), dtype=np.float32) # TODO: patchsize indices = np.zeros((f.shape[2]-2, f.shape[3]-2, 3), dtype=np.int32) # TODO: patchsize - previous = self.pm_previous.get(l+1, None) + previous = self.pm_previous.get(l+1, None) if previous is not None: def rescale(a): return scipy.ndimage.zoom(np.pad(a, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] indices[:,:,1:] = np.concatenate([rescale(previous[0][:,:,i]*2) for i in [1,2]], axis=(2))[+1:-1,+1:-1] else: indices[:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:2]) # TODO: patchsize indices[:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:2]) # TODO: patchsize - patches_initialize(f, buffers, indices, scores) - for i in range(5): + + if l in self.pm_previous: + i, s = self.pm_previous[l] + w = np.where(s > scores) + indices[w], scores[w] = i[w], s[w] + + for i in range((l-1)**2): patches_propagate(f, buffers, indices, scores, i) - patches_search(f, buffers, indices, scores, 5) + patches_search(f, buffers, indices, scores, 10) self.pm_previous[l] = (indices, scores) return indices, scores @@ -528,7 +532,7 @@ def evaluate_feature(self, layer, feature, variety=0.0): flat_idx = np.sum(best_idx.reshape((-1,3)) * np.array([B.shape[1]*B.shape[2], B.shape[2], 1]), axis=(1)) used = 99. * len(set(flat_idx)) / flat_idx.shape[0] - duplicates = 99. * len([v for v in np.bincount(flat_idx) if v>1]) / flat_idx.shape[0] + duplicates = 99. * len([v for v in np.bincount(flat_idx) if v>1]) / len(set(flat_idx)) changed = 99. * (1.0 - np.where(indices == flat_idx)[0].shape[0] / flat_idx.shape[0]) err = best_val.mean() @@ -546,10 +550,10 @@ def evaluate_features(self): content_weight, noise_weight, variety, iterations = p for j in range(iterations): blended = sum([a*w for a, w in self.layer_inputs[i]]) / sum([w for _, w in self.layer_inputs[i]]) - # self.render(blended, l, 'blended-L{}I{}'.format(l, j+1)) + self.render(blended, l, 'blended-L{}I{}'.format(l, j+1)) feature = blended * (1.0 - content_weight) + c * content_weight \ + np.random.normal(0.0, 1.0, size=c.shape).astype(np.float32) * (0.1 * noise_weight) - # self.render(feature, l, 'mixed-L{}I{}'.format(l, j+1)) + self.render(feature, l, 'mixed-L{}I{}'.format(l, j+1)) result = self.evaluate_feature(l, feature, variety) self.render(result, l, 'output-L{}I{}'.format(l, j+1)) self.layer_inputs[i][i].array[:] = result From b413689a561194c4c767b5129c6bd929d651aeb5 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Tue, 14 Jun 2016 19:30:30 +0200 Subject: [PATCH 53/58] Extracted patch score calculation, removed unused code. --- doodle.py | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/doodle.py b/doodle.py index a7d10db..bd19d2d 100755 --- a/doodle.py +++ b/doodle.py @@ -187,13 +187,9 @@ def ConcatenateLayer(incoming, layer): return ConcatLayer([incoming, net['map%i'%layer]]) if args.semantic_weight > 0.0 else incoming # Auxiliary network for the semantic layers, and the nearest neighbors calculations. - self.pm_inputs, self.pm_buffers, self.pm_candidates = {}, {}, {} for layer, upper, lower in zip(args.layers, [None] + args.layers[:-1], args.layers[1:] + [None]): self.channels[layer] = net['enc%i_1'%layer].num_filters net['sem%i'%layer] = ConcatenateLayer(net['enc%i_1'%layer], layer) - self.pm_inputs[layer] = T.ftensor4() - self.pm_buffers[layer] = T.ftensor4() - self.pm_candidates[layer] = T.itensor4() self.network = net def load_data(self): @@ -246,16 +242,20 @@ def finalize_image(self, image, resolution): # Fast Patch Matching #---------------------------------------------------------------------------------------------------------------------- +@numba.jit() +def patches_score(current, buffers, i0, i1, i2, b, a): + score = 0.0 + for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: + score += np.sum(buffers[i0,:,i1+y,i2+x] * current[0,:,1+b+y,1+a+x]) + return score + @numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:])], '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b)', nopython=True, target='parallel') def patches_initialize(current, buffers, indices, scores): for b in range(indices.shape[0]): for a in range(indices.shape[1]): i0, i1, i2 = indices[b,a] - score = 0.0 - for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: - score += np.sum(buffers[i0,:,i1+y,i2+x] * current[0,:,1+b+y,1+a+x]) - scores[b,a] = score + scores[b,a] = patches_score(current, buffers, i0, i1, i2, b, a) @numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:], numba.float32[:])], '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b),()', nopython=True) @@ -268,9 +268,7 @@ def patches_propagate(current, buffers, indices, scores, i): - np.array(offset, dtype=np.int32) i1 = min(buffers.shape[2]-2, max(i1, 1)) i2 = min(buffers.shape[3]-2, max(i2, 1)) - score = 0.0 - for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: - score += np.sum(buffers[i0,:,i1+y,i2+x] * current[0,:,1+b+y,1+a+x]) + score = patches_score(current, buffers, i0, i1, i2, b, a) if score > scores[b,a]: scores[b,a] = score indices[b,a] = np.array((i0, i1, i2), dtype=np.int32) @@ -286,9 +284,7 @@ def patches_search(current, buffers, indices, scores, k): # i2 = min(buffers.shape[3]-2, max(i2 + random.randint(-w, +w), 1)) i1 = np.random.randint(1, buffers.shape[2]-1) i2 = np.random.randint(1, buffers.shape[3]-1) - score = 0.0 - for y, x in [(-1,-1),(-1,0),(-1,+1),(0,-1),(0,0),(0,+1),(+1,-1),(+1,0),(+1,+1)]: - score += np.sum(buffers[i0,:,i1+y,i2+x] * current[0,:,1+b+y,1+a+x]) + score = patches_score(current, buffers, i0, i1, i2, b, a) if score > scores[b,a]: scores[b,a] = score indices[b,a] = np.array((i0, i1, i2), dtype=np.int32) @@ -326,7 +322,7 @@ def __init__(self): def rescale_image(self, img, scale): """Re-implementing skimage.transform.scale without the extra dependency. Saves a lot of space and hassle! """ - output = scipy.misc.toimage(img, cmin=0.0, cmax=255) + output = scipy.misc.toimage(img, cmin=0.0, cmax=255.0) return np.asarray(PIL.ImageOps.fit(output, [snap(dim*scale) for dim in output.size], PIL.Image.ANTIALIAS)) def load_images(self, name, filename, scale=1.0): @@ -531,12 +527,12 @@ def evaluate_feature(self, layer, feature, variety=0.0): better_feature = reconstruct_from_patches_2d(better_patches, better_shape) flat_idx = np.sum(best_idx.reshape((-1,3)) * np.array([B.shape[1]*B.shape[2], B.shape[2], 1]), axis=(1)) - used = 99. * len(set(flat_idx)) / flat_idx.shape[0] - duplicates = 99. * len([v for v in np.bincount(flat_idx) if v>1]) / len(set(flat_idx)) - changed = 99. * (1.0 - np.where(indices == flat_idx)[0].shape[0] / flat_idx.shape[0]) + used = 100.0 * len(set(flat_idx)) / flat_idx.shape[0] + duplicates = 100.0 * len([v for v in np.bincount(flat_idx) if v>1]) / len(set(flat_idx)) + changed = 100.0 * (1.0 - np.where(indices == flat_idx)[0].shape[0] / flat_idx.shape[0]) err = best_val.mean() - print(' {}layer{} {:>1} {}patches{} used {:2.0f}% dups {:2.0f}% chgd {:2.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ + print(' {}layer{} {:>1} {}patches{} used {:<3.0f}% dups {:<3.0f}% chgd {:<3.0f}% {}error{} {:3.2e} {}time{} {:3.1f}s'\ .format(ansi.BOLD, ansi.ENDC, layer, ansi.BOLD, ansi.ENDC, used, duplicates, changed, ansi.BOLD, ansi.ENDC, err, ansi.BOLD, ansi.ENDC, time.time() - iter_time)) From 3eb2fe5dfa26b8c2697c7ce2fb679b7ed287aabc Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Wed, 15 Jun 2016 21:18:13 +0200 Subject: [PATCH 54/58] Simplified model code, preparing for custom patch biases. --- doodle.py | 72 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 35 deletions(-) diff --git a/doodle.py b/doodle.py index bd19d2d..5fe24d4 100755 --- a/doodle.py +++ b/doodle.py @@ -42,7 +42,8 @@ add_arg('--content-weight', default=[.3,.2,.1], nargs='+', type=float, help='Weight of input content features each layer.') add_arg('--noise-weight', default=[.2,.1,.0], nargs='+', type=float, help='Weight of noise added into features.') add_arg('--iterations', default=[4, 4, 1], nargs='+', type=int, help='Number of times to repeat layer optimization.') -add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') +add_arg('--shapes', default=[3], nargs='+', type=int, help='Size of kernels used for patch extraction.') +add_arg('--quality', default=0.002, type=float, help='Threshold of improvement to stop patch matching.') add_arg('--seed', default=None, type=int, help='Initial state for the random number generator.') add_arg('--semantic-ext', default='_sem.png', type=str, help='File extension for the semantic maps.') add_arg('--semantic-weight', default=0.0, type=float, help='Global weight of semantics vs. style features.') @@ -132,7 +133,7 @@ def setup_model(self, previous=None): for j in range(6): net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') - self.tensor_latent = [] + self.tensor_img, self.tensor_map, self.tensor_latent = T.tensor4(), T.tensor4(), [] for l in args.layers: self.tensor_latent.append((str(l), T.tensor4())) net['lat%i'%l] = InputLayer((None, self.units[l], None, None), var=self.tensor_latent[-1][1]) @@ -208,20 +209,6 @@ def load_data(self): .format(layer, p.get_value().shape, v.shape) p.set_value(v.astype(np.float32)) - def setup(self, layers): - """Setup the inputs and outputs, knowing the layers that are required by the optimization algorithm. - """ - self.tensor_img = T.tensor4() - self.tensor_map = T.tensor4() - tensor_inputs = {self.network['img']: self.tensor_img, self.network['map']: self.tensor_map} - outputs = lasagne.layers.get_output([self.network[l] for l in layers], tensor_inputs) - self.tensor_outputs = {k: v for k, v in zip(layers, outputs)} - - def get_outputs(self, type, layers): - """Fetch the output tensors for the network layers. - """ - return [self.tensor_outputs[type+l] for l in layers] - def prepare_image(self, image): """Given an image loaded from disk, turn it into a representation compatible with the model. The format is (b,c,y,x) with batch=1 for a single image, channels=3 for RGB, and y,x matching the resolution. @@ -257,35 +244,35 @@ def patches_initialize(current, buffers, indices, scores): i0, i1, i2 = indices[b,a] scores[b,a] = patches_score(current, buffers, i0, i1, i2, b, a) -@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:], numba.float32[:])], - '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b),()', nopython=True) -def patches_propagate(current, buffers, indices, scores, i): +@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.float32[:,:,:], numba.int32[:,:,:], numba.float32[:,:], numba.float32[:])], + '(n,c,x,y),(n,c,z,w),(n,z,w),(a,b,i),(a,b),()', nopython=True) +def patches_propagate(current, buffers, biases, indices, scores, i): even = bool((i[0]%2)==0) for b in range(0, indices.shape[0]) if even else range(indices.shape[0]-1, -1, -1): for a in range(0, indices.shape[1]) if even else range(indices.shape[1]-1, -1, -1): for offset in [(0, 0, -1 if even else +1), (0, -1 if even else +1, 0)]: i0, i1, i2 = indices[min(indices.shape[0]-1, max(b+offset[1], 0)), min(indices.shape[1]-1, max(a+offset[2], 0))]\ - np.array(offset, dtype=np.int32) - i1 = min(buffers.shape[2]-2, max(i1, 1)) - i2 = min(buffers.shape[3]-2, max(i2, 1)) + i1, i2 = min(buffers.shape[2]-2, max(i1, 1)), min(buffers.shape[3]-2, max(i2, 1)) + j0, j1, j2 = indices[b,a] score = patches_score(current, buffers, i0, i1, i2, b, a) - if score > scores[b,a]: + if score + biases[i0,i1,i2] > scores[b,a] + biases[j0,j1,j2]: scores[b,a] = score indices[b,a] = np.array((i0, i1, i2), dtype=np.int32) -@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.int32[:,:,:], numba.float32[:,:], numba.int32[:])], - '(n,c,x,y),(n,c,z,w),(a,b,i),(a,b),()', nopython=True, target='parallel') -def patches_search(current, buffers, indices, scores, k): +@numba.guvectorize([(numba.float32[:,:,:,:], numba.float32[:,:,:,:], numba.float32[:,:,:], numba.int32[:,:,:], numba.float32[:,:], numba.int32[:])], + '(n,c,x,y),(n,c,z,w),(n,z,w),(a,b,i),(a,b),()', nopython=True, target='parallel') +def patches_search(current, buffers, biases, indices, scores, k): for b in range(indices.shape[0]): for a in range(indices.shape[1]): i0, i1, i2 = indices[b,a] for w in range(k[0]): # i1 = min(buffers.shape[2]-2, max(i1 + random.randint(-w, +w), 1)) # i2 = min(buffers.shape[3]-2, max(i2 + random.randint(-w, +w), 1)) - i1 = np.random.randint(1, buffers.shape[2]-1) - i2 = np.random.randint(1, buffers.shape[3]-1) + i1, i2 = np.random.randint(1, buffers.shape[2]-1), np.random.randint(1, buffers.shape[3]-1) + j0, j1, j2 = indices[b,a] score = patches_score(current, buffers, i0, i1, i2, b, a) - if score > scores[b,a]: + if score + biases[i0,i1,i2] > scores[b,a] + biases[j0,j1,j2]: scores[b,a] = score indices[b,a] = np.array((i0, i1, i2), dtype=np.int32) @@ -459,6 +446,7 @@ def prepare_generation(self): self.layer_inputs = [[LayerInput(np.copy(self.content_features[i]), w) for _, w in zip(args.layers, extend(args.layer_weight))] for i, _ in enumerate(args.layers)] + def prepare_network(self): # Decoding intermediate features into more specialized features and all the way to the output image. self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0', self.model.tensor_img)] for name, (input, tensor_latent) in zip(args.layers, input_tensors): @@ -486,13 +474,24 @@ def evaluate_patches(self, l, f, v): self.normalize_components(l, buffers, self.style_data[l][1:3]) self.normalize_components(l, f, self.compute_norms(np, l, f)) + biases = np.zeros((buffers.shape[0],)+buffers.shape[2:], dtype=np.float32) scores = np.zeros((f.shape[2]-2, f.shape[3]-2), dtype=np.float32) # TODO: patchsize indices = np.zeros((f.shape[2]-2, f.shape[3]-2, 3), dtype=np.int32) # TODO: patchsize - previous = self.pm_previous.get(l+1, None) + """ + cur_gram = f.reshape((f.shape[1], -1)) + cur_gram = np.tensordot(cur_gram, cur_gram.T, axes=(1,0)) + + sty_gram = buffers.reshape((buffers.shape[1], -1)) + sty_gram = np.tensordot(sty_gram, sty_gram.T, axes=(1,0)) + + adjust = sty_gram - cur_gram + """ + + previous = self.pm_previous.get(l+1, None) if previous is not None: - def rescale(a): return scipy.ndimage.zoom(np.pad(a, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] - indices[:,:,1:] = np.concatenate([rescale(previous[0][:,:,i]*2) for i in [1,2]], axis=(2))[+1:-1,+1:-1] + def rescale(a): return scipy.ndimage.zoom(np.pad(a, 1, mode='reflect'), 2, order=1)[:,:,np.newaxis] # TODO: patchsize + indices[:,:,1:] = np.concatenate([rescale(previous[0][:,:,i]*2) for i in [1,2]], axis=(2))[+1:-1,+1:-1] # TODO: patchsize else: indices[:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:2]) # TODO: patchsize indices[:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:2]) # TODO: patchsize @@ -503,9 +502,12 @@ def rescale(a): return scipy.ndimage.zoom(np.pad(a, 1, mode='reflect'), 2, order w = np.where(s > scores) indices[w], scores[w] = i[w], s[w] - for i in range((l-1)**2): - patches_propagate(f, buffers, indices, scores, i) - patches_search(f, buffers, indices, scores, 10) + m = scores.mean() + for i in itertools.count(): + patches_propagate(f, buffers, biases, indices, scores, i) + patches_search(f, buffers, biases, indices, scores, 10) + m, s = scores.mean(), m + if m - s < args.quality: break self.pm_previous[l] = (indices, scores) return indices, scores @@ -590,10 +592,10 @@ def render(self, features, layer, suffix): def run(self): """The main entry point for the application, runs through multiple phases at increasing resolutions. """ - self.model.setup(layers=['enc%i_1'%l for l in args.layers] + ['sem%i'%l for l in args.layers] + ['dec%i_1'%l for l in args.layers[1:]]) self.prepare_style() self.prepare_content() self.prepare_generation() + self.prepare_network() Xn = self.evaluate((self.content_img[0] + 1.0) * 127.5) output = self.model.finalize_image(Xn.reshape(self.content_img.shape[1:]), self.content_shape) From ac57d04784290db0b727b874c4dea2dc32e31775 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Thu, 16 Jun 2016 23:53:36 +0200 Subject: [PATCH 55/58] Experimental visualizations of the network. --- tools/visualize.py | 77 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 tools/visualize.py diff --git a/tools/visualize.py b/tools/visualize.py new file mode 100644 index 0000000..08ba26e --- /dev/null +++ b/tools/visualize.py @@ -0,0 +1,77 @@ +import matplotlib.pyplot as plt +import numpy as np + +import doodle + +generator = doodle.NeuralGenerator() +generator.prepare_network() + +def calculate_patch_coordinates(l, j, i): + ys, xs, ye, xe = j, i, j, i + while hasattr(l, 'filter_size'): + after = l.filter_size[0]//2 + before = l.filter_size[0] - 1 - after + ys -= before + xs -= before + ye += after + xe += after + ys *= l.stride[0] + xs *= l.stride[0] + ye *= l.stride[0] + xe *= l.stride[0] + l = l.input_layer + return ys, xs, ye, xe + +import glob +import collections + + +candidates = collections.defaultdict(list) +for content in glob.glob(doodle.args.content): + image, mask = generator.load_images('content', content, scale=1.0) + + feature = generator.model.prepare_image(image) + for layer, encoder in reversed(list(zip(doodle.args.layers, generator.encoders))): + feature = encoder(feature, mask) + + x = feature.reshape(feature.shape[:2]+(-1,))[:,:-3,:] + # x = (x - x.mean(axis=(0,2), keepdims=True)) # / x.std(axis=(0,2), keepdims=True) + covariance = np.tensordot(x, x, axes=([2], [2])).mean(axis=(0,2)) / x.shape[2] + np.fill_diagonal(covariance, 0.0) + # print(covariance.shape, covariance.min(), covariance.max()) + + # subplot.imshow(covariance, interpolation='nearest') + + for i in range(feature.shape[1]): + w = feature[:,i:i+1,:,:] + for idx in np.argsort(w.flatten())[-15:]: + _, _, y, x = np.unravel_index(idx, w.shape) + # print('coords', y, x, 'value', ) + a, b, c, d = calculate_patch_coordinates(generator.model.network['enc%i_1'%layer], y, x) + img = np.copy(image[max(0,a):min(image.shape[0],c), max(0, b):min(image.shape[1],d)]) + candidates[i].append((img, w.flatten()[idx])) + + # _, _, y, x = np.unravel_index(feature[0,0,:,:].argmax(), feature.shape) + # print(y, x, calculate_patch_coordinates('enc%i_1'%layer, y, x)) + + # subplot.set_title('Layer {}'.format(layer)) + + # subplot.violinplot([feature[:,i,:,:].flatten() for i in range(feature.shape[1])], showmeans=False, showmedians=True) + + # x = np.arange(0, feature.shape[1], 1) + # y = [feature.min(axis=(0,2,3)), feature.mean(axis=(0,2,3)), feature.max(axis=(0,2,3))] + # for j in y: + # plt.errorbar(x, j) + +fig, axes = plt.subplots(3, 5, figsize=(10, 6), subplot_kw={'xticks': [], 'yticks': []}) +fig.subplots_adjust(hspace=0.3, wspace=0.05) +# if not hasattr(axes, 'flat'): axes.flat = [plt] + +for i, c in candidates.items(): + c.sort(key=lambda x: x[1]) + for (img, _), subplot in zip(c[-15:], axes.flat): + subplot.imshow(img, interpolation='nearest') + plt.savefig('channel_{}.png'.format(i)) + +# plt.show() +# print(i, c[0][1], c[-1][1]) From 69881552069e215e7bbbd1c94aa1cd1c56b6afd3 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sat, 18 Jun 2016 17:29:49 +0200 Subject: [PATCH 56/58] Patch-variety experimental code using statistics, works in a single pass but benefits from iteration still. --- doodle.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/doodle.py b/doodle.py index 5fe24d4..06fe8a6 100755 --- a/doodle.py +++ b/doodle.py @@ -447,7 +447,6 @@ def prepare_generation(self): for i, _ in enumerate(args.layers)] def prepare_network(self): - # Decoding intermediate features into more specialized features and all the way to the output image. self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0', self.model.tensor_img)] for name, (input, tensor_latent) in zip(args.layers, input_tensors): layer = lasagne.layers.get_output(self.model.network['enc%i_1'%name], @@ -456,6 +455,7 @@ def prepare_network(self): fn = self.compile([tensor_latent, self.model.tensor_map], layer) self.encoders.append(fn) + # Decoding intermediate features into more specialized features and all the way to the output image. self.decoders, output_layers = [], (['dec%i_1'%l for l in args.layers[1:]] + ['out']) for name, (input, tensor_latent), output in zip(args.layers, self.model.tensor_latent, output_layers): layer = lasagne.layers.get_output(self.model.network[output], @@ -478,15 +478,23 @@ def evaluate_patches(self, l, f, v): scores = np.zeros((f.shape[2]-2, f.shape[3]-2), dtype=np.float32) # TODO: patchsize indices = np.zeros((f.shape[2]-2, f.shape[3]-2, 3), dtype=np.int32) # TODO: patchsize - """ + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + # Patch-variety experiment that boosts the scores of patches that are clearly distinct from + # the current statistical distribution. + # + # TODO: Move the `for` loop into a numba vectorized function that can be run in parallel. + + # sty_gram = buffers.reshape((buffers.shape[1], -1)) + # sty_gram = np.tensordot(sty_gram, sty_gram, axes=(1,1)) + cur_gram = f.reshape((f.shape[1], -1)) - cur_gram = np.tensordot(cur_gram, cur_gram.T, axes=(1,0)) + cur_gram = np.tensordot(cur_gram, cur_gram, axes=(1,1)) / cur_gram.shape[1] - sty_gram = buffers.reshape((buffers.shape[1], -1)) - sty_gram = np.tensordot(sty_gram, sty_gram.T, axes=(1,0)) + for y, x in itertools.product(range(buffers.shape[2]), range(buffers.shape[3])): + pix_gram = buffers[0,:,y,x].reshape((-1,1)) * buffers[0,:,y,x].reshape((1,-1)) + biases[0,y,x] = np.sum((pix_gram - cur_gram) ** 2.0) * 50.0 - adjust = sty_gram - cur_gram - """ + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ previous = self.pm_previous.get(l+1, None) if previous is not None: From f08c4d2cd2aa9ad5cf58a7a8e0b0c1e3d5149b54 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 19 Jun 2016 15:43:02 +0200 Subject: [PATCH 57/58] Removed content feature normalisation, prototype for matching style grams not just diversity, improved patch-matching, saving fewer files in debug mode. --- doodle.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/doodle.py b/doodle.py index 06fe8a6..8cd5121 100755 --- a/doodle.py +++ b/doodle.py @@ -266,10 +266,10 @@ def patches_search(current, buffers, biases, indices, scores, k): for b in range(indices.shape[0]): for a in range(indices.shape[1]): i0, i1, i2 = indices[b,a] - for w in range(k[0]): - # i1 = min(buffers.shape[2]-2, max(i1 + random.randint(-w, +w), 1)) - # i2 = min(buffers.shape[3]-2, max(i2 + random.randint(-w, +w), 1)) - i1, i2 = np.random.randint(1, buffers.shape[2]-1), np.random.randint(1, buffers.shape[3]-1) + for radius in range(k[0], 0, -1): + w = 2 ** radius + i1 = min(buffers.shape[2]-2, max(i1 + np.random.randint(-w, +w), 1)) + i2 = min(buffers.shape[3]-2, max(i2 + np.random.randint(-w, +w), 1)) j0, j1, j2 = indices[b,a] score = patches_score(current, buffers, i0, i1, i2, b, a) if score + biases[i0,i1,i2] > scores[b,a] + biases[j0,j1,j2]: @@ -431,10 +431,7 @@ def prepare_content(self, scale=1.0): feature, *_ = encoder(feature, self.content_map) feature = feature[:,:self.model.channels[layer]] style = self.style_data[layer][0] - - sn, sx = style.min(axis=(0,2,3), keepdims=True), style.max(axis=(0,2,3), keepdims=True) - cn, cx = feature.min(axis=(0,2,3), keepdims=True), feature.max(axis=(0,2,3), keepdims=True) - self.content_features.insert(0, sn + (feature - cn) * (sx - sn) / (cx - cn)) + self.content_features.insert(0, feature) print(" - Layer {} as {} array in {:,}kb.".format(layer, feature.shape[1:], feature.size//1000)) def prepare_generation(self): @@ -484,15 +481,16 @@ def evaluate_patches(self, l, f, v): # # TODO: Move the `for` loop into a numba vectorized function that can be run in parallel. - # sty_gram = buffers.reshape((buffers.shape[1], -1)) - # sty_gram = np.tensordot(sty_gram, sty_gram, axes=(1,1)) + sty_gram = buffers.reshape((buffers.shape[1], -1)) + sty_gram = np.tensordot(sty_gram, sty_gram, axes=(1,1)) / sty_gram.shape[1] cur_gram = f.reshape((f.shape[1], -1)) cur_gram = np.tensordot(cur_gram, cur_gram, axes=(1,1)) / cur_gram.shape[1] for y, x in itertools.product(range(buffers.shape[2]), range(buffers.shape[3])): pix_gram = buffers[0,:,y,x].reshape((-1,1)) * buffers[0,:,y,x].reshape((1,-1)) - biases[0,y,x] = np.sum((pix_gram - cur_gram) ** 2.0) * 50.0 + # biases[0,y,x] = np.sum((pix_gram - cur_gram) ** 2.0) * args.variety + biases[0,y,x] = np.sum((pix_gram - cur_gram) * (sty_gram - cur_gram)) * args.variety[0] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -513,7 +511,7 @@ def rescale(a): return scipy.ndimage.zoom(np.pad(a, 1, mode='reflect'), 2, order m = scores.mean() for i in itertools.count(): patches_propagate(f, buffers, biases, indices, scores, i) - patches_search(f, buffers, biases, indices, scores, 10) + patches_search(f, buffers, biases, indices, scores, 8) m, s = scores.mean(), m if m - s < args.quality: break @@ -556,10 +554,14 @@ def evaluate_features(self): content_weight, noise_weight, variety, iterations = p for j in range(iterations): blended = sum([a*w for a, w in self.layer_inputs[i]]) / sum([w for _, w in self.layer_inputs[i]]) - self.render(blended, l, 'blended-L{}I{}'.format(l, j+1)) + if len(self.layer_inputs[i]) > 1: + self.render(blended, l, 'blended-L{}I{}'.format(l, j+1)) + feature = blended * (1.0 - content_weight) + c * content_weight \ + np.random.normal(0.0, 1.0, size=c.shape).astype(np.float32) * (0.1 * noise_weight) - self.render(feature, l, 'mixed-L{}I{}'.format(l, j+1)) + if content_weight not in (0.0, 1.0): + self.render(feature, l, 'mixed-L{}I{}'.format(l, j+1)) + result = self.evaluate_feature(l, feature, variety) self.render(result, l, 'output-L{}I{}'.format(l, j+1)) self.layer_inputs[i][i].array[:] = result From fae15a796b0e748915b0cd39b83760c6d7a1fb23 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sun, 3 Jul 2016 21:13:35 +0200 Subject: [PATCH 58/58] Experiment with residual network. Features are not as well suited to expressing style. --- doodle.py | 118 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 67 insertions(+), 51 deletions(-) diff --git a/doodle.py b/doodle.py index 8cd5121..02f33dd 100755 --- a/doodle.py +++ b/doodle.py @@ -127,19 +127,21 @@ def setup_model(self, previous=None): and then adding augmentations for Semantic Style Transfer. """ net, self.channels = {}, {} - self.units = {1: 48, 2: 80, 3: 128, 4: 208, 5: 328, 6: 536} + self.units = {1: 48, 2: 80, 3: 136, 4: 224} net['map'] = InputLayer((1, None, None, None)) + net['pool'] = InputLayer((1, 3, None, None)) for j in range(6): net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') + net['pool%i'%(j+1)] = PoolLayer(net['pool'], 2**j, mode='average_exc_pad') - self.tensor_img, self.tensor_map, self.tensor_latent = T.tensor4(), T.tensor4(), [] + self.tensor_img, self.tensor_pool, self.tensor_map, self.tensor_latent = T.tensor4(), T.tensor4(), T.tensor4(), [] for l in args.layers: self.tensor_latent.append((str(l), T.tensor4())) - net['lat%i'%l] = InputLayer((None, self.units[l], None, None), var=self.tensor_latent[-1][1]) + net['lat%i'%l] = InputLayer((None, 3+self.units[l], None, None), var=self.tensor_latent[-1][1]) def EncdLayer(previous, channels, filter_size, pad, stride=(1,1), nonlinearity=lasagne.nonlinearities.elu): - incoming = net['lat'+previous[0]] if int(previous[0]) in args.layers and previous[1:] == '_1' else net['enc'+previous] + incoming = net['lat'+previous[0]] if int(previous[0]) in args.layers and previous[1:] == '_0' else net['enc'+previous] return ConvLayer(incoming, channels, filter_size, pad=pad, stride=stride, nonlinearity=nonlinearity) # Encoder part of the neural network, takes an input image and turns it into abstract patterns. @@ -147,41 +149,39 @@ def EncdLayer(previous, channels, filter_size, pad, stride=(1,1), nonlinearity=l net['enc0_0'], net['lat0'] = net['img'], net['img'] net['enc1_1'] = EncdLayer('0_0', 48, 3, pad=1) net['enc1_2'] = EncdLayer('1_1', 48, 3, pad=1) - net['enc2_1'] = EncdLayer('1_2', 80, 2, pad=0, stride=(2,2)) + net['enc2_0-conv'] = EncdLayer('1_2', 80, 2, pad=0, stride=(2,2), nonlinearity=lasagne.nonlinearities.elu) + net['enc2_0'] = ConcatLayer([net['pool2'], net['enc2_0-conv']], axis=1) + net['enc2_0'].num_filters = net['enc2_0-conv'].num_filters + 3 + net['enc2_1'] = EncdLayer('2_0', 80, 3, pad=1) net['enc2_2'] = EncdLayer('2_1', 80, 3, pad=1) - net['enc3_1'] = EncdLayer('2_2', 128, 2, pad=0, stride=(2,2)) - net['enc3_2'] = EncdLayer('3_1', 128, 3, pad=1) - net['enc3_3'] = EncdLayer('3_2', 128, 3, pad=1) - net['enc4_1'] = EncdLayer('3_3', 208, 2, pad=0, stride=(2,2)) - net['enc4_2'] = EncdLayer('4_1', 208, 3, pad=1) - net['enc4_3'] = EncdLayer('4_2', 208, 3, pad=1) - net['enc5_1'] = EncdLayer('4_3', 328, 2, pad=0, stride=(2,2)) - net['enc5_2'] = EncdLayer('5_1', 328, 3, pad=1) - net['enc5_3'] = EncdLayer('5_2', 328, 3, pad=1) - net['enc6_1'] = EncdLayer('5_3', 536, 2, pad=0, stride=(2,2)) + net['enc3_0-conv'] = EncdLayer('2_2', 136, 2, pad=0, stride=(2,2), nonlinearity=lasagne.nonlinearities.elu) + net['enc3_0'] = ConcatLayer([net['pool3'], net['enc3_0-conv']], axis=1) + net['enc3_0'].num_filters = net['enc3_0-conv'].num_filters + 3 + net['enc3_1'] = EncdLayer('3_0', 136, 3, pad=1) + net['enc3_2'] = EncdLayer('3_1', 136, 3, pad=1) + net['enc3_3'] = EncdLayer('3_2', 136, 3, pad=1) + net['enc4_0-conv'] = EncdLayer('3_3', 224, 2, pad=0, stride=(2,2), nonlinearity=lasagne.nonlinearities.elu) + net['enc4_0'] = ConcatLayer([net['pool4'], net['enc4_0-conv']], axis=1) + net['enc4_0'].num_filters = net['enc4_0-conv'].num_filters + 3 def DecdLayer(copy, previous, channels, nonlinearity=lasagne.nonlinearities.elu): # Dynamically injects intermediate "pitstop" output layers in the decoder based on what the user specified as layers. - dup, incoming = net['enc'+copy], net['lat'+copy[0]] if int(copy[0]) in args.layers and copy[1:] == '_1' else net[previous] + dup, incoming = net['enc'+copy], net['lat'+copy[0]] if int(copy[0]) in args.layers and '_0' in copy[1:] else net[previous] return DeconvLayer(incoming, channels, dup.filter_size, stride=dup.stride, crop=dup.pad, nonlinearity=nonlinearity) # Decoder part of the neural network, takes abstract patterns and converts them into an image! - net['dec5_3'] = DecdLayer('6_1', 'enc6_1', 328) - net['dec5_2'] = DecdLayer('5_3', 'dec5_3', 328) - net['dec5_1'] = DecdLayer('5_2', 'dec5_2', 328) - net['dec4_3'] = DecdLayer('5_1', 'dec5_1', 208) - net['dec4_2'] = DecdLayer('4_3', 'dec4_3', 208) - net['dec4_1'] = DecdLayer('4_2', 'dec4_2', 208) - net['dec3_3'] = DecdLayer('4_1', 'dec4_1', 128) - net['dec3_2'] = DecdLayer('3_3', 'dec3_3', 128) - net['dec3_1'] = DecdLayer('3_2', 'dec3_2', 128) - net['dec2_2'] = DecdLayer('3_1', 'dec3_1', 80) + net['dec3_3'] = DecdLayer('4_0-conv', 'enc4_0', 136) + net['dec3_2'] = DecdLayer('3_3', 'dec3_3', 136) + net['dec3_1'] = DecdLayer('3_2', 'dec3_2', 136) + net['dec3_0'] = DecdLayer('3_1', 'dec3_1', 139, nonlinearity=lasagne.nonlinearities.elu) + net['dec2_2'] = DecdLayer('3_0-conv', 'dec3_0', 80) net['dec2_1'] = DecdLayer('2_2', 'dec2_2', 80) - net['dec1_2'] = DecdLayer('2_1', 'dec2_1', 48) + net['dec2_0'] = DecdLayer('2_1', 'dec2_1', 83, nonlinearity=lasagne.nonlinearities.elu) + net['dec1_2'] = DecdLayer('2_0-conv', 'dec2_0', 48) net['dec1_1'] = DecdLayer('1_2', 'dec1_2', 48) - net['dec0_1'] = DecdLayer('1_1', 'dec1_1', 3, nonlinearity=lasagne.nonlinearities.tanh) - net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec0_1'], shared_axes=(0,1,2,3)) - net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(127.5*(x+1.0), 0.0, 255.0)) + net['dec1_0'] = DecdLayer('1_1', 'dec1_1', 3, nonlinearity=lasagne.nonlinearities.elu) + net['dec0_0'] = lasagne.layers.ScaleLayer(net['dec1_0'], shared_axes=(0,1,2,3)) + net['out'] = lasagne.layers.NonlinearityLayer(net['dec0_0'], nonlinearity=lambda x: T.clip(255.0*(x+0.5), 0.0, 255.0)) def ConcatenateLayer(incoming, layer): # TODO: The model is constructed too soon, we don't yet know if semantic_weight is needed. Fails if not. @@ -189,8 +189,8 @@ def ConcatenateLayer(incoming, layer): # Auxiliary network for the semantic layers, and the nearest neighbors calculations. for layer, upper, lower in zip(args.layers, [None] + args.layers[:-1], args.layers[1:] + [None]): - self.channels[layer] = net['enc%i_1'%layer].num_filters - net['sem%i'%layer] = ConcatenateLayer(net['enc%i_1'%layer], layer) + self.channels[layer] = net['enc%i_0'%layer].num_filters + net['sem%i'%layer] = ConcatenateLayer(net['enc%i_0'%layer], layer) self.network = net def load_data(self): @@ -203,10 +203,17 @@ def load_data(self): data = pickle.load(open(data_file, 'rb')) for layer, values in data.items(): - if layer not in self.network: continue + if '.' in layer: continue # TODO: Filter out from data in first place. + if layer not in self.network: + print('problem', layer) + continue for p, v in zip(self.network[layer].get_params(), values): - assert p.get_value().shape == v.shape, "Layer `{}` in network has size {} but data is {}."\ - .format(layer, p.get_value().shape, v.shape) + ps = p.get_value().shape + if ps != v.shape: + print("Layer `{}` in network has size {} but data is {}.".format(layer, ps, v.shape)) + if len(ps) >= 2 and v.shape[1] > ps[1]: v = v[:,:ps[1]] + if len(ps) == 1 and v.shape[0] > ps[0]: v = v[:ps[0]] + assert ps == v.shape p.set_value(v.astype(np.float32)) def prepare_image(self, image): @@ -214,7 +221,7 @@ def prepare_image(self, image): (b,c,y,x) with batch=1 for a single image, channels=3 for RGB, and y,x matching the resolution. """ image = np.swapaxes(np.swapaxes(image, 1, 2), 0, 1)[::-1, :, :] - image = image.astype(np.float32) / 127.5 - 1.0 + image = image.astype(np.float32) / 255.0 - 0.5 return image[np.newaxis] def finalize_image(self, image, resolution): @@ -374,14 +381,15 @@ def prepare_style(self, scale=1.0): for layer, (input, tensor_latent), shape in zip(args.layers, input_tensors, extend(args.shapes)): output = lasagne.layers.get_output(self.model.network['sem%i'%layer], {self.model.network['lat'+input]: tensor_latent, - self.model.network['map']: self.model.tensor_map}) - fn = self.compile([tensor_latent, self.model.tensor_map], [output] + self.compute_norms(T, layer, output)) + self.model.network['map']: self.model.tensor_map, + self.model.network['pool']: self.model.tensor_pool}) + fn = self.compile([tensor_latent, self.model.tensor_pool, self.model.tensor_map], [output] + self.compute_norms(T, layer, output)) self.encoders.append(fn) # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. self.style_data, feature = {}, self.style_img for layer, encoder in reversed(list(zip(args.layers, self.encoders))): - feature, *data = encoder(feature, self.style_map) + feature, *data = encoder(feature, self.style_img, self.style_map) self.style_data[layer] = [d.astype(np.float16) for d in [feature]+data]\ + [np.zeros((feature.shape[0],), dtype=np.float16), -1] print(' - Layer {} as {} patches {} in {:,}kb.'.format(layer, feature.shape[:2], feature.shape[2:], feature.size//1000)) @@ -428,11 +436,12 @@ def prepare_content(self, scale=1.0): # Feed-forward calculation only, returns the result of the convolution post-activation self.content_features, feature = [], self.content_img for layer, encoder in reversed(list(zip(args.layers, self.encoders))): - feature, *_ = encoder(feature, self.content_map) + feature, *_ = encoder(feature, self.content_img, self.content_map) feature = feature[:,:self.model.channels[layer]] style = self.style_data[layer][0] self.content_features.insert(0, feature) print(" - Layer {} as {} array in {:,}kb.".format(layer, feature.shape[1:], feature.size//1000)) + print("\t", feature[:,:3].min(), feature[:,:3].max()) def prepare_generation(self): """Layerwise synthesis images requires two sets of Theano functions to be compiled. @@ -446,14 +455,15 @@ def prepare_generation(self): def prepare_network(self): self.encoders, input_tensors = [], self.model.tensor_latent[1:] + [('0', self.model.tensor_img)] for name, (input, tensor_latent) in zip(args.layers, input_tensors): - layer = lasagne.layers.get_output(self.model.network['enc%i_1'%name], - {self.model.network['lat'+input]: tensor_latent, - self.model.network['map']: self.model.tensor_map}) - fn = self.compile([tensor_latent, self.model.tensor_map], layer) + layer = lasagne.layers.get_output(self.model.network['enc%i_0'%name], + {self.model.network['lat'+input]: tensor_latent, + self.model.network['map']: self.model.tensor_map, + self.model.network['pool']: self.model.tensor_pool}) + fn = self.compile([tensor_latent, self.model.tensor_pool, self.model.tensor_map], layer) self.encoders.append(fn) # Decoding intermediate features into more specialized features and all the way to the output image. - self.decoders, output_layers = [], (['dec%i_1'%l for l in args.layers[1:]] + ['out']) + self.decoders, output_layers = [], (['dec%i_0'%l for l in args.layers[1:]] + ['out']) for name, (input, tensor_latent), output in zip(args.layers, self.model.tensor_latent, output_layers): layer = lasagne.layers.get_output(self.model.network[output], {self.model.network['lat'+input]: tensor_latent, @@ -501,17 +511,17 @@ def rescale(a): return scipy.ndimage.zoom(np.pad(a, 1, mode='reflect'), 2, order else: indices[:,:,1] = np.random.randint(low=1, high=buffers.shape[2]-1, size=indices.shape[:2]) # TODO: patchsize indices[:,:,2] = np.random.randint(low=1, high=buffers.shape[3]-1, size=indices.shape[:2]) # TODO: patchsize - patches_initialize(f, buffers, indices, scores) + patches_initialize(f[:,3:], buffers[:,3:], indices, scores) if l in self.pm_previous: i, s = self.pm_previous[l] - w = np.where(s > scores) + w = np.where(s > scores) # TODO: add biases indices[w], scores[w] = i[w], s[w] m = scores.mean() for i in itertools.count(): - patches_propagate(f, buffers, biases, indices, scores, i) - patches_search(f, buffers, biases, indices, scores, 8) + patches_propagate(f[:,3:], buffers[:,3:], biases, indices, scores, i) + patches_search(f[:,3:], buffers[:,3:], biases, indices, scores, 8) m, s = scores.mean(), m if m - s < args.quality: break @@ -579,6 +589,7 @@ def evaluate(self, Xn): """ self.frame = 0 for i, c in zip(args.layers, self.content_features): + print('rendering layer', i) self.render(c, i, 'orig-L{}'.format(i)) for j in range(args.passes): @@ -592,11 +603,16 @@ def render(self, features, layer, suffix): """Decode features at a specific layer and save the result to disk for visualization. (Takes 50% more time.) """ if not args.frames: return + filename = os.path.splitext(os.path.basename(args.output))[0] + for l, compute in list(zip(args.layers, self.decoders))[args.layers.index(layer):]: + img = features[0,:3][::-1] + print('min', img.min(), 'max', img.max()) + scipy.misc.toimage(img * 255.0 + 127.5, cmin=0, cmax=255).save('frames/raw{}_{}-{:03d}-{}.png'.format(l, filename, self.frame, suffix)) + features = compute(features[:,:self.model.channels[l]], self.content_map) output = self.model.finalize_image(features.reshape(self.content_img.shape[1:]), self.content_shape) - filename = os.path.splitext(os.path.basename(args.output))[0] scipy.misc.toimage(output, cmin=0, cmax=255).save('frames/{}-{:03d}-{}.png'.format(filename, self.frame, suffix)) def run(self): @@ -607,7 +623,7 @@ def run(self): self.prepare_generation() self.prepare_network() - Xn = self.evaluate((self.content_img[0] + 1.0) * 127.5) + Xn = self.evaluate((self.content_img[0] + 0.5) * 255.0) output = self.model.finalize_image(Xn.reshape(self.content_img.shape[1:]), self.content_shape) scipy.misc.toimage(output, cmin=0, cmax=255).save(args.output)